blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6463120100067e88f76a9d5be84a539e55539baa | 5682dc024dd37ea0753d57819bab8a4891d6bb31 | /my_tiff_package/my_tiff_package/__init__.py | 34da447454fa182f4e7bc7d7df34feb2f0f2bde8 | [] | no_license | danielballan/reader-intake-adapter | 20424026ecc23f7aa9ab6ae2035988e60b0ca244 | f06ae1c9aef4e8277e81b5903a54d1a124590457 | refs/heads/master | 2022-04-07T05:50:38.877360 | 2020-03-05T15:12:10 | 2020-03-05T15:12:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,752 | py | import dask.array
import tifffile
class TIFFReader:
"""
Accepts file, filepath, or filepath glob.
"""
container = 'dask.array.core.Array'
def __init__(self, file):
if isinstance(file, str):
# file is a filepath or filepath glob
import os
if os.path.isfile(file):
self._tiff_files = [tifffile.TiffFile(file)]
else:
import glob
self._tiff_files = [tifffile.TiffFile(file_)
for file_ in glob.glob(file)]
else:
# file is a file buffer
self._tiff_files = [tifffile.TiffFile(file)]
self._file = file # only used in __repr__
self._closed = False
def __repr__(self):
return f"{self.__class__.__name__}({self._file!r})"
def read(self):
if self._closed:
raise Closed(f"{self} is closed and can no longer be read.")
stack = []
for tf in self._tiff_files:
assert len(tf.series) == 1 # should be True by construction
series = tf.series[0]
dtype = series.dtype
for page in series.pages:
stack.append(dask.array.from_delayed(
dask.delayed(page.asarray)(),
shape=page.shape, dtype=dtype))
return dask.array.stack(stack)
def close(self):
self._closed = True
for tf in self._tiff_files:
tf.close()
def __enter__(self):
return self
def __exit__(self, *exc_details):
self.close()
class Closed(Exception):
...
# intake compatibility
from reader_adapter import adapt # noqa
TIFFDataSource = adapt(TIFFReader, 'TIFFDataSource')
| [
"dallan@bnl.gov"
] | dallan@bnl.gov |
a1aea68f5189555f5d4f065f1fb96097d119efd8 | 7ac506e9890ff3295541bdd00bc3c40a13b23826 | /main_top_down_baseline_with_objcls.py | 269650272d1e6c9075b8b7539d83fe14c0eb51bc | [] | no_license | thilinicooray/CARN | dd64cde17e5c9f0e68e94b340c9271ae2a79f58a | 7f5e85c98d3178a3fe9b9f1b5a93a39ace01ccc5 | refs/heads/master | 2020-07-25T05:42:38.988848 | 2020-04-07T03:43:01 | 2020-04-07T03:43:01 | 208,182,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,887 | py | import torch
import json
import os
from sr import utils, imsitu_scorer, imsitu_loader, imsitu_encoder
from sr.model import top_down_baseline_with_objcls
def train(model, train_loader, dev_loader, optimizer, scheduler, max_epoch, model_dir, encoder, gpu_mode, clip_norm, model_name, model_saving_name, eval_frequency=4000):
model.train()
train_loss = 0
total_steps = 0
print_freq = 400
dev_score_list = []
if gpu_mode >= 0 :
ngpus = 2
device_array = [i for i in range(0,ngpus)]
pmodel = torch.nn.DataParallel(model, device_ids=device_array)
else:
pmodel = model
#pmodel = model
top1 = imsitu_scorer.imsitu_scorer(encoder, 1, 3)
top5 = imsitu_scorer.imsitu_scorer(encoder, 5, 3)
for epoch in range(max_epoch):
for i, (_, img, verb, labels) in enumerate(train_loader):
total_steps += 1
if gpu_mode >= 0:
img = torch.autograd.Variable(img.cuda())
verb = torch.autograd.Variable(verb.cuda())
labels = torch.autograd.Variable(labels.cuda())
else:
img = torch.autograd.Variable(img)
verb = torch.autograd.Variable(verb)
labels = torch.autograd.Variable(labels)
role_predict = pmodel(img, verb)
loss = model.calculate_loss(verb, role_predict, labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip_norm)
optimizer.step()
optimizer.zero_grad()
train_loss += loss.item()
top1.add_point_noun(verb, role_predict, labels)
top5.add_point_noun(verb, role_predict, labels)
if total_steps % print_freq == 0:
top1_a = top1.get_average_results_nouns()
top5_a = top5.get_average_results_nouns()
print ("{},{},{}, {} , {}, loss = {:.2f}, avg loss = {:.2f}"
.format(total_steps-1,epoch,i, utils.format_dict(top1_a, "{:.2f}", "1-"),
utils.format_dict(top5_a,"{:.2f}","5-"), loss.item(),
train_loss / ((total_steps-1)%eval_frequency) ))
if total_steps % eval_frequency == 0:
top1, top5, val_loss = eval(model, dev_loader, encoder, gpu_mode)
model.train()
top1_avg = top1.get_average_results_nouns()
top5_avg = top5.get_average_results_nouns()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Dev {} average :{:.2f} {} {}'.format(total_steps-1, avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
dev_score_list.append(avg_score)
max_score = max(dev_score_list)
if max_score == dev_score_list[-1]:
torch.save(model.state_dict(), model_dir + "/{}_{}.model".format( model_name, model_saving_name))
print ('New best model saved! {0}'.format(max_score))
print('current train loss', train_loss)
train_loss = 0
top1 = imsitu_scorer.imsitu_scorer(encoder, 1, 3)
top5 = imsitu_scorer.imsitu_scorer(encoder, 5, 3)
del role_predict, loss, img, verb, labels
print('Epoch ', epoch, ' completed!')
scheduler.step()
def eval(model, dev_loader, encoder, gpu_mode, write_to_file = False):
model.eval()
print ('evaluating model...')
top1 = imsitu_scorer.imsitu_scorer(encoder, 1, 3, write_to_file)
top5 = imsitu_scorer.imsitu_scorer(encoder, 5, 3)
with torch.no_grad():
for i, (img_id, img, verb, labels) in enumerate(dev_loader):
#print(img_id[0], encoder.verb2_role_dict[encoder.verb_list[verb[0]]])
if gpu_mode >= 0:
img = torch.autograd.Variable(img.cuda())
verb = torch.autograd.Variable(verb.cuda())
labels = torch.autograd.Variable(labels.cuda())
else:
img = torch.autograd.Variable(img)
verb = torch.autograd.Variable(verb)
labels = torch.autograd.Variable(labels)
role_predict = model(img, verb)
if write_to_file:
top1.add_point_noun_log(img_id, verb, role_predict, labels)
top5.add_point_noun_log(img_id, verb, role_predict, labels)
else:
top1.add_point_noun(verb, role_predict, labels)
top5.add_point_noun(verb, role_predict, labels)
del role_predict, img, verb, labels
#break
return top1, top5, 0
def main():
import argparse
parser = argparse.ArgumentParser(description="imsitu VSRL. Training, evaluation and prediction.")
parser.add_argument("--gpuid", default=-1, help="put GPU id > -1 in GPU mode", type=int)
parser.add_argument('--output_dir', type=str, default='./trained_models', help='Location to output the model')
parser.add_argument('--resume_training', action='store_true', help='Resume training from the model [resume_model]')
parser.add_argument('--resume_model', type=str, default='', help='The model we resume')
parser.add_argument('--evaluate', action='store_true', help='Only use the testing mode')
parser.add_argument('--test', action='store_true', help='Only use the testing mode')
parser.add_argument('--dataset_folder', type=str, default='./imSitu', help='Location of annotations')
parser.add_argument('--imgset_dir', type=str, default='./resized_256', help='Location of original images')
parser.add_argument('--train_file', default="train_freq2000.json", type=str, help='trainfile name')
parser.add_argument('--dev_file', default="dev_freq2000.json", type=str, help='dev file name')
parser.add_argument('--test_file', default="test_freq2000.json", type=str, help='test file name')
parser.add_argument('--model_saving_name', type=str, help='saving name of the outpul model')
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--model', type=str, default='top_down_baseline')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--seed', type=int, default=1111, help='random seed')
parser.add_argument('--clip_norm', type=float, default=0.25)
parser.add_argument('--num_workers', type=int, default=3)
args = parser.parse_args()
n_epoch = args.epochs
batch_size = args.batch_size
clip_norm = args.clip_norm
n_worker = args.num_workers
dataset_folder = args.dataset_folder
imgset_folder = args.imgset_dir
train_set = json.load(open(dataset_folder + '/' + args.train_file))
encoder = imsitu_encoder.imsitu_encoder(train_set)
train_set = imsitu_loader.imsitu_loader(imgset_folder, train_set, encoder,'train', encoder.train_transform)
constructor = 'build_%s' % args.model
model = getattr(top_down_baseline_with_objcls, constructor)(encoder.get_num_roles(),encoder.get_num_verbs(), encoder.get_num_labels(), encoder)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=n_worker)
dev_set = json.load(open(dataset_folder + '/' + args.dev_file))
dev_set = imsitu_loader.imsitu_loader(imgset_folder, dev_set, encoder, 'val', encoder.dev_transform)
dev_loader = torch.utils.data.DataLoader(dev_set, batch_size=batch_size, shuffle=True, num_workers=n_worker)
test_set = json.load(open(dataset_folder + '/' + args.test_file))
test_set = imsitu_loader.imsitu_loader(imgset_folder, test_set, encoder, 'test', encoder.dev_transform)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True, num_workers=n_worker)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
torch.manual_seed(args.seed)
if args.gpuid >= 0:
model.cuda()
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True
if args.resume_training:
print('Resume training from: {}'.format(args.resume_model))
args.train_all = True
if len(args.resume_model) == 0:
raise Exception('[pretrained module] not specified')
utils.load_net(args.resume_model, [model])
optimizer = torch.optim.Adamax(model.parameters(), lr=1e-3)
model_name = 'resume_all'
else:
print('Training from the scratch.')
model_name = 'train_full'
utils.set_trainable(model, True)
optimizer = torch.optim.Adamax([
{'params': model.convnet.parameters(), 'lr': 5e-5},
{'params': model.role_emb.parameters()},
{'params': model.verb_emb.parameters()},
{'params': model.query_composer.parameters()},
{'params': model.v_att.parameters()},
{'params': model.q_net.parameters()},
{'params': model.v_net.parameters()},
{'params': model.classifier.parameters()},
{'params': model.obj_cls.parameters()},
], lr=1e-3)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
if args.evaluate:
top1, top5, val_loss = eval(model, dev_loader, encoder, args.gpuid, write_to_file = True)
top1_avg = top1.get_average_results_nouns()
top5_avg = top5.get_average_results_nouns()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Dev average :{:.2f} {} {}'.format( avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
#write results to csv file
role_dict = top1.role_dict
fail_val_all = top1.value_all_dict
pass_val_dict = top1.vall_all_correct
with open(args.model_saving_name+'_role_pred_data.json', 'w') as fp:
json.dump(role_dict, fp, indent=4)
with open(args.model_saving_name+'_fail_val_all.json', 'w') as fp:
json.dump(fail_val_all, fp, indent=4)
with open(args.model_saving_name+'_pass_val_all.json', 'w') as fp:
json.dump(pass_val_dict, fp, indent=4)
print('Writing predictions to file completed !')
elif args.test:
top1, top5, val_loss = eval(model, test_loader, encoder, args.gpuid, write_to_file = True)
top1_avg = top1.get_average_results_nouns()
top5_avg = top5.get_average_results_nouns()
avg_score = top1_avg["verb"] + top1_avg["value"] + top1_avg["value-all"] + top5_avg["verb"] + \
top5_avg["value"] + top5_avg["value-all"] + top5_avg["value*"] + top5_avg["value-all*"]
avg_score /= 8
print ('Test average :{:.2f} {} {}'.format( avg_score*100,
utils.format_dict(top1_avg,'{:.2f}', '1-'),
utils.format_dict(top5_avg, '{:.2f}', '5-')))
else:
print('Model training started!')
train(model, train_loader, dev_loader, optimizer, scheduler, n_epoch, args.output_dir, encoder, args.gpuid, clip_norm, model_name, args.model_saving_name,
)
if __name__ == "__main__":
main()
| [
"thilinicooray.ucsc@gmail.com"
] | thilinicooray.ucsc@gmail.com |
282f9f494bed1bf028d83c8f4c05f917f6111523 | 29da2ca6def1270be13a3096685a8e5d82828dff | /CIM14/CPSM/Equipment/Wires/SeriesCompensator.py | 3d24c9da21e451acecdfd4f1a62596944c6ded43 | [
"MIT"
] | permissive | rimbendhaou/PyCIM | 75eb3bcd3729b2410c03f3d5c66d6f1e05e21df3 | d578bb0bf1af344342bd23344385ed9c06c2d0ee | refs/heads/master | 2022-04-28T01:16:12.673867 | 2020-04-16T02:19:09 | 2020-04-16T02:19:09 | 256,085,381 | 0 | 0 | MIT | 2020-04-16T02:15:20 | 2020-04-16T02:08:14 | null | UTF-8 | Python | false | false | 2,022 | py | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CPSM.Equipment.Core.ConductingEquipment import ConductingEquipment
class SeriesCompensator(ConductingEquipment):
"""A Series Compensator is a series capacitor or reactor or an AC transmission line without charging susceptance. It is a two terminal device.- [R9.3] is satisfied by navigation to ConnectivityNode and Substation
"""
def __init__(self, r=0.0, x=0.0, *args, **kw_args):
"""Initialises a new 'SeriesCompensator' instance.
@param r: Positive sequence resistance.
@param x: Positive sequence reactance.
"""
#: Positive sequence resistance.
self.r = r
#: Positive sequence reactance.
self.x = x
super(SeriesCompensator, self).__init__(*args, **kw_args)
_attrs = ["r", "x"]
_attr_types = {"r": float, "x": float}
_defaults = {"r": 0.0, "x": 0.0}
_enums = {}
_refs = []
_many_refs = []
| [
"rwl@thinker.cable.virginmedia.net"
] | rwl@thinker.cable.virginmedia.net |
19517ca7ef64c2333ba5aa5f106fb3d0b5e76ce3 | 0b9622c6d67ddcb252a7a4dd9b38d493dfc9a25f | /HackerRank/30daysChallenge/Day21.py | c3c18c68d2565763a4359b71e362ce6b8b0e1447 | [] | no_license | d80b2t/python | eff2b19a69b55d73c4734fb9bc115be1d2193e2d | 73603b90996221e0bcd239f9b9f0458b99c6dc44 | refs/heads/master | 2020-05-21T20:43:54.501991 | 2017-12-24T12:55:59 | 2017-12-24T12:55:59 | 61,330,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | """
Nota Bene: Day 21 was about Generics.
This was acutally not a Python solveable challenge (only C++, C#, Java 7, Java 8 and Swift) were offered, so no code was written here or progress made.
"""
| [
"npross@lbl.gov"
] | npross@lbl.gov |
b13111418e3b0d4dfe2eaa935cff789e858685ea | a983c40db193d9294ea93628c57f514e0e6e9c2a | /src/shop/api.py | 14c053a442e1c9fa5524098b2ef6d551d6b1c4fe | [
"MIT"
] | permissive | ElinSwedin/foobar-api | 80f4d1dd5e0dabefb80eab77bd92e4e8a277c9b9 | 7ab204894c7579dd3f9dec3d2cee1166eb046199 | refs/heads/develop | 2021-01-19T12:26:27.085054 | 2017-02-17T09:42:11 | 2017-02-17T11:05:38 | 82,311,799 | 2 | 0 | null | 2017-02-17T15:43:49 | 2017-02-17T15:43:49 | null | UTF-8 | Python | false | false | 7,973 | py | import logging
from django.db import transaction
from django.contrib.contenttypes.models import ContentType
from . import models, enums, suppliers, exceptions
log = logging.getLogger(__name__)
@transaction.atomic
def create_product(code, name):
"""Create an product"""
product_obj = models.Product(
code=code,
name=name
)
product_obj.save()
return product_obj
@transaction.atomic
def update_product(id, **kwargs):
product_obj = models.Product.objects.get(id=id)
for k, v in kwargs.items():
setattr(product_obj, k, v)
product_obj.save()
def get_product(id):
"""Return item with given id.
Returns None if the product does not exist.
"""
try:
return models.Product.objects.get(id=id)
except models.Product.DoesNotExist:
return None
def get_product_transactions_by_ref(reference):
"""Return item transactions with given reference."""
ct = ContentType.objects.get_for_model(reference)
return models.ProductTransaction.objects.filter(
reference_ct=ct,
reference_id=reference.pk,
)
@transaction.atomic
def create_product_transaction(product_id, trx_type, qty, reference=None):
"""
Create item transaction for given item.
It automagically takes care of updating the quantity for the product.
"""
product_obj = models.Product.objects.get(id=product_id)
ct = None
if reference is not None:
ct = ContentType.objects.get_for_model(reference)
trx_obj = product_obj.transactions.create(
trx_type=trx_type,
qty=qty,
reference_ct=ct,
reference_id=reference.pk if reference is not None else None
)
return trx_obj
@transaction.atomic
def cancel_product_transaction(trx_id):
trx_obj = models.ProductTransaction.objects.get(id=trx_id)
trx_obj.trx_status = enums.TrxStatus.CANCELED
trx_obj.save()
def list_products(start=None, limit=None, **kwargs):
"""Returns a list of products matching the criteria.
Criteria should be passed to the function as keyword arguments.
Criteria arguments support Django field lookups.
"""
return models.Product.objects.filter(**kwargs)[start:limit]
def list_categories():
return models.ProductCategory.objects.all()
@transaction.atomic
def get_supplier_product(supplier_id, sku):
"""Returns supplier product for given SKU.
If the product does not exist in the local database, fetch it from the
supplier.
"""
try:
return models.SupplierProduct.objects.get(
supplier_id=supplier_id,
sku=sku
)
except models.SupplierProduct.DoesNotExist:
pass
# Product has not been found in the database. Let's fetch it from
# the supplier.
supplier_obj = models.Supplier.objects.get(id=supplier_id)
supplier_api = suppliers.get_supplier_api(supplier_obj.internal_name)
product_data = supplier_api.retrieve_product(sku)
if product_data is None:
log.warning('Product not found (sku: %s, supplier: %s',
sku, supplier_id)
return None
product_obj = models.SupplierProduct.objects.create(
supplier_id=supplier_id,
sku=sku,
price=product_data.price,
name=product_data.name
)
return product_obj
def parse_report(supplier_internal_name, report_path):
"""Parses a report file and returns parsed items."""
supplier_api = suppliers.get_supplier_api(supplier_internal_name)
return supplier_api.parse_delivery_report(report_path)
@transaction.atomic
def populate_delivery(delivery_id):
"""Populates the delivery with products based on the imported report."""
delivery_obj = models.Delivery.objects.get(id=delivery_id)
supplier_obj = delivery_obj.supplier
items = parse_report(supplier_obj.internal_name, delivery_obj.report.path)
for item in items:
product_obj = get_supplier_product(supplier_obj.id, item.sku)
if product_obj is not None:
models.DeliveryItem.objects.create(
delivery=delivery_obj,
supplier_product_id=product_obj.id,
qty=item.qty * product_obj.qty_multiplier,
price=item.price / product_obj.qty_multiplier
)
return delivery_obj
@transaction.atomic
def process_delivery(delivery_id):
"""Adjusts the stock quantities based on the delivery data."""
delivery_obj = models.Delivery.objects.get(id=delivery_id)
assert delivery_obj.valid, ('Some of the delivered items are not '
'associated with a product in the system.')
for item in delivery_obj.delivery_items.all():
supplier_product = item.supplier_product
create_product_transaction(
product_id=supplier_product.product.id,
trx_type=enums.TrxType.INVENTORY,
qty=item.qty,
reference=item
)
delivery_obj.locked = True
delivery_obj.save()
@transaction.atomic
def initiate_stocktaking(chunk_size=10):
"""Initiates a stock-taking procedure for all the products."""
stocktake_qs = models.Stocktake.objects
# Make sure that there is no stock-taking in progress
if not stocktake_qs.filter(locked=False).count() == 0:
raise exceptions.APIException('Stock-taking already in progress.')
stocktake_obj = stocktake_qs.create()
# Order products by category, so that chunk contain mostly that share
# category. Products in the same category are most often placed near each
# other, which should make the process of stock-taking more effective.
product_objs = list(models.Product.objects.all().order_by('category'))
for i in range(0, len(product_objs), chunk_size):
chunk_obj = stocktake_obj.chunks.create()
chunk_products = product_objs[i:i + chunk_size]
for p in chunk_products:
chunk_obj.items.create(product=p)
return stocktake_obj
@transaction.atomic
def finalize_stocktaking(stocktake_id):
"""Applies the result of stock taking to the stock quantities."""
stocktake_obj = models.Stocktake.objects.get(id=stocktake_id)
if stocktake_obj.locked:
raise exceptions.APIException('Stock-taking already finished.')
# Make sure that all the chunks are finished
chunk_objs = stocktake_obj.chunks.all()
if not all(obj.locked for obj in chunk_objs):
raise exceptions.APIException('Found unfinished chunks.')
for chunk_obj in chunk_objs:
for item_obj in chunk_obj.items.all():
product_obj = item_obj.product
create_product_transaction(
product_id=product_obj.id,
trx_type=enums.TrxType.CORRECTION,
qty=item_obj.qty - product_obj.qty,
reference=item_obj
)
stocktake_obj.locked = True
stocktake_obj.save()
return stocktake_obj
def finalize_stocktake_chunk(chunk_id):
"""Marks given chunk as finished."""
chunk_obj = models.StocktakeChunk.objects.get(id=chunk_id)
if chunk_obj.locked:
raise exceptions.APIException('Chunk already locked.')
chunk_obj.locked = True
chunk_obj.owner = None
chunk_obj.save()
@transaction.atomic
def assign_free_stocktake_chunk(user_id, stocktake_id):
"""Assigns a free stock-take chunk to a user, if any free left.
If user is already assigned to a chunk, that chunk should be returned.
"""
chunk_qs = models.StocktakeChunk.objects.select_for_update()
try:
return chunk_qs.get(
stocktake_id=stocktake_id,
owner_id=user_id
)
except models.StocktakeChunk.DoesNotExist:
pass
chunk_objs = chunk_qs.filter(
stocktake_id=stocktake_id,
locked=False,
owner__isnull=True
)
if not chunk_objs:
return None
chunk_obj = chunk_objs.first()
chunk_obj.owner_id = user_id
chunk_obj.save()
return chunk_obj
| [
"me@kjagiello.com"
] | me@kjagiello.com |
1c656ad16666594fb3d4c6f4f1e6bba48319f683 | be0e0488a46b57bf6aff46c687d2a3080053e52d | /python/programmers/level1/문자열다루기기본.py | a210099dc2f10aaf38e4017dbeb93a0c2c6db444 | [] | no_license | syo0e/Algorithm | b3f8a0df0029e4d6c9cbf19dcfcb312ba25ea939 | 1ae754d5bb37d02f28cf1d50463a494896d5026f | refs/heads/master | 2023-06-09T11:31:54.266900 | 2021-06-30T17:04:38 | 2021-06-30T17:04:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | def isnumber(s):
for i in range(len(s)):
if s[i] < '0' or s[i] > '9':
return False
return True
def solution(s):
if (len(s) == 4 or len(s) == 6) and isnumber(s):
return True
return False
| [
"kyun2dot@gmail.com"
] | kyun2dot@gmail.com |
ecb1f30725bfd84a818036a942659d29d1dfdfa4 | c7009093f1e4d5db31d7fb5d876b46f5f9ac5268 | /week2/game_hobit/game.py | 5f8e1cf9d01888c4697f3705b59e8f2cd391fb2d | [] | no_license | mileto94/HackBulgaria | 4ea52ff306c202b9207f66e218ca79082832246a | 414b37dd102a3de5e976d4d97b1b2d95bb253892 | refs/heads/master | 2016-09-06T17:31:04.618614 | 2014-06-24T18:40:27 | 2014-06-24T18:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,197 | py | import random
from random import randint
class Entity():
"""docstring for Entity"""
def __init__(self,name, health):
self.name = name
self.health = health
def get_name(self):
return self.name
def get_health(self):
return self.health
def is_alive(self):
if self.health > 0:
return True
else:
return False
def equip_weapon(self, weapon_for_entity):
setattr(self, "weapon", weapon_for_entity)
return True
def has_weapon(self):
return hasattr(self, "weapon")
def take_damage(self, damage_points):
self.health -= damage_points
return self.health
def take_healing(self, healing_points):
if self.health > 0 and self.health < 100:
self.health += healing_points
return self.health
def attack(self):
if self.has_weapon():
return self.weapon.damage
else:
return 10
class Hero(Entity):
"""docstring for Hero"""
def __init__(self, name, health, nickname):
super().__init__(name, health)
self.nickname = nickname
def get_nickname(self):
return self.nickname
def known_as(self):
return self.name + " the " + self.nickname
class Orc(Entity):
"""docstring for Org"""
def __init__(self, name, health, berserk_factor):
super().__init__(name, health)
self.berserk_factor = berserk_factor
def attack(self):
return self.berserk_factor*(super().attack())
class Weapon():
"""docstring for Weapon"""
def __init__(self, Type, damage, critical_strike_percent):
self.Type = Type
self.damage = damage
self.critical_strike_percent = critical_strike_percent
def getType(self):
return self.Type
def getCriticalStrikePercent(self):
return self.critical_strike_percent
def critical_hit(self):
random_strike_percent = random.uniform(0,1)
if self.critical_strike_percent > random_strike_percent:
self.critical_strike_percent *= 2
return self.critical_strike_percent
else:
return self.critical_strike_percent
class Fight():
"""docstring for Fight"""
def __init__(self, new_hero, new_orc):
self.hero = new_hero
self.orc = new_orc
def who_starts(self):
random_who_starts = randint(1,100)
if random_who_starts <= 50:
first = self.hero
return first
else:
return self.orc
def get_opponent(self,first):
if first == self.hero:
return self.orc
else:
return self.hero
def simulate_fight(self):
first = self.who_starts()
print("%s starts" % first.name)
second = self.get_opponent(first)
if first.has_weapon()==False:
weapon_bare_hands = Weapon("his bare hands!", 5, 0.09)
setattr(Entity, "weapon", weapon_bare_hands)
if second.has_weapon()==False:
weapon_bare_hands = Weapon("his bare hands!", 5, 0.09)
setattr(Entity, "weapon", weapon_bare_hands)
while True:
damage = first.attack()
print("%s attacks with %s" % (first.name, first.weapon.Type))
second.take_damage(damage)
print("%s is hurted" % (first.name))
if second.is_alive() == False:
print("%s died" % second.name)
break
damage = second.attack()
print("%s attacks with %s" % (first.name, second.weapon.Type))
first.take_damage(damage)
print("%s is hurted" % (second.name))
if first.is_alive() == False:
print("%s died" % first.name)
break
if first.is_alive():
return first
return second
class Dungeon():
"""docstring for Dungein"""
def __init__(self, file_to_read):
file = open(file_to_read, "r")
self.unparsed_map = file.read()
file.close()
def get_map(self):
return self.unparsed_map
def print_map(self):
print(self.get_map())
| [
"mileto94@abv.bg"
] | mileto94@abv.bg |
2ed56086a41fca06fb78b34b10bedbfee178a202 | 621ca3f68b088699c42a16051df1d64b6a5ac505 | /virtual/bin/pip3 | da45e50dbe672a6aca5e1632cfb1450927ff6f5d | [
"MIT"
] | permissive | nziokaivy/hood | 2af3bdbf1c258e504da2a4550b524319cab907bb | f82dde0f132330589aacfeefde0229d0bb909b9c | refs/heads/master | 2020-04-30T20:42:45.567319 | 2019-04-08T07:55:05 | 2019-04-08T07:55:05 | 177,075,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | #!/home/ivy/Documents/Moringa-school-projects/core-projects/python/hood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"nziokaivy@gmail.com"
] | nziokaivy@gmail.com | |
c47fd8fda3a6c52e6bd550bbca685152fa026643 | 515a97129ce1b2b8eecca4b2087fde8985b82d5b | /Code-Scraps/old_modules/SpiceBot/Main/forfuckssake.py | 18ffb3c5664225b11ec0c4fd4d80c90d641ea910 | [] | no_license | SpiceBot/scraps | 3ad6e81ac75e2b6a684fea64eb7e75477b0f4f63 | 90125e1397b57ac87cae5f3e506363aa04ddffdc | refs/heads/master | 2020-05-02T21:51:01.297114 | 2019-03-28T15:38:28 | 2019-03-28T15:38:28 | 178,232,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | #!/usr/bin/env python
# coding=utf-8
from __future__ import unicode_literals, absolute_import, print_function, division
import sopel.module
import sys
import os
moduledir = os.path.dirname(__file__)
shareddir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
sys.path.append(shareddir)
from BotShared import *
# author jimender2
@sopel.module.commands('forfuckssake','ffs')
def mainfunction(bot, trigger):
enablestatus, triggerargsarray, botcom, instigator = spicebot_prerun(bot, trigger, 'forfuckssake')
if not enablestatus:
# IF "&&" is in the full input, it is treated as multiple commands, and is split
commands_array = spicemanip(bot, triggerargsarray, "split_&&")
if commands_array == []:
commands_array = [[]]
for command_split_partial in commands_array:
triggerargsarray_part = spicemanip(bot, command_split_partial, 'create')
execute_main(bot, trigger, triggerargsarray_part, botcom, instigator)
def execute_main(bot, trigger, triggerargsarray, botcom, instigator):
osd(bot, trigger.sender, 'say', "For fuck sakes lizard people, get your shit together!!")
| [
"sam@deathbybandaid.net"
] | sam@deathbybandaid.net |
2ffe78d090dc035ee3e3c38344a9e74b0442c2ec | 97758972dcbc3ad0e1e6057cdf1e0265c9e26c46 | /circularly_linked_list/tests/test_insert_cyclic_list.py | ac498d7e144659df1c5b77a38f88fb40f13cdf5f | [
"MIT"
] | permissive | ahcode0919/python-ds-algorithms | a4eea3358258e0ec3802aa9bf4470aa81b399d2a | 966565753eba5414903300379db6abac1b80a3d0 | refs/heads/main | 2022-12-23T11:56:21.142540 | 2022-12-19T16:22:24 | 2022-12-19T16:22:24 | 140,489,999 | 0 | 3 | MIT | 2022-12-19T16:22:25 | 2018-07-10T21:40:32 | Python | UTF-8 | Python | false | false | 627 | py | from circularly_linked_list.insert_cyclic_list import insert
from data_structures.singly_linked_list_node import SinglyLinkedListNode
from test_helpers.test_helpers import get_cyclic_list_values
def test_insert():
head = None
assert get_cyclic_list_values(insert(head, 1)) == [1]
head = SinglyLinkedListNode(1)
head.next = head
assert get_cyclic_list_values(insert(head, 2)) == [1, 2]
head = SinglyLinkedListNode(3)
head.next = SinglyLinkedListNode(4)
head.next.next = SinglyLinkedListNode(1)
head.next.next.next = head
assert get_cyclic_list_values(insert(head, 2)) == [3, 4, 1, 2]
| [
"noreply@github.com"
] | ahcode0919.noreply@github.com |
745cdf579469254d8a84fa40c6cc5cde7de681e1 | e5755d76e50e902246884310a7781059bd7ff222 | /mongoDB/6.Insert.py | 7dc6eba4175fdbcb45942c935ac3da1f5130d1ee | [] | no_license | Sens3ii/PP2-2020 | 4f1f9c0588476ca415b0ae2efc0f171e826dd3f8 | 3675aa4860f727ecb26360d8624e396663dfc5b2 | refs/heads/master | 2023-05-29T15:02:17.803095 | 2021-06-14T17:05:52 | 2021-06-14T17:05:52 | 236,768,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,185 | py | import pymongo
myclient = pymongo.MongoClient("mongodb+srv://students:321321@firstcluster-bxilw.gcp.mongodb.net/test?retryWrites=true&w=majority")
mydb = myclient["mydatabase"]
mycol = mydb["students"]
mylist = [
{ "_id": 1, "name": "John", "address": "Highway 37"},
{ "_id": 2, "name": "Peter", "address": "Lowstreet 27"},
{ "_id": 3, "name": "Amy", "address": "Apple st 652"},
{ "_id": 4, "name": "Hannah", "address": "Mountain 21"},
{ "_id": 5, "name": "Michael", "address": "Valley 345"},
{ "_id": 6, "name": "Sandy", "address": "Ocean blvd 2"},
{ "_id": 7, "name": "Betty", "address": "Green Grass 1"},
{ "_id": 8, "name": "Richard", "address": "Sky st 331"},
{ "_id": 9, "name": "Susan", "address": "One way 98"},
{ "_id": 10, "name": "Vicky", "address": "Yellow Garden 2"},
{ "_id": 11, "name": "Ben", "address": "Park Lane 38"},
{ "_id": 12, "name": "William", "address": "Central st 954"},
{ "_id": 13, "name": "Chuck", "address": "Main Road 989"},
{ "_id": 14, "name": "Viola", "address": "Sideway 1633"}
]
x = mycol.insert_many(mylist)
#print a list of the _id values of the inserted documents:
print(x.inserted_ids) | [
"noreply@github.com"
] | Sens3ii.noreply@github.com |
fc16106826b25ab2c3851b6b86707066454ecae6 | 4fc87c7c55d431943eba76caaa76cc889e99bd3f | /npf/core/workflow/models/workflow_mytask.py | 52b87a074885ee3a7de16a31c5cd5377c973edeb | [] | no_license | Bonasolvo/npf-dev-roles | c774359b79642ae9ca2c82daeb0591677bd8e88c | dbde9493f2d23fd238dd3a6d8771bbbc5a650724 | refs/heads/master | 2016-09-01T05:35:50.246086 | 2015-12-15T07:02:40 | 2015-12-15T07:02:40 | 48,026,149 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | from npf.core.workflow.models import WorkflowTaskInstance
class WorkflowMyTaskInstance(WorkflowTaskInstance):
"""
Прокси-модель: Экземпляр задачи. Используется для фильтрации всех задач по текущему пользователю и
вывода списка "Мои задачи".
"""
class Meta:
verbose_name = 'Задача'
verbose_name_plural = 'Мои задачи'
proxy = True
| [
"tymashh@Mac-Tymashh.local"
] | tymashh@Mac-Tymashh.local |
b35986bb150d6f28720c88f4ce694446174ca46c | 09e5cfe06e437989a2ccf2aeecb9c73eb998a36c | /modules/cctbx_project/cctbx/development/electron_density_sampling.py | bc7f0cf4ae8f29f9ce2b13518f1c8d09d8f3953c | [
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] | permissive | jorgediazjr/dials-dev20191018 | b81b19653624cee39207b7cefb8dfcb2e99b79eb | 77d66c719b5746f37af51ad593e2941ed6fbba17 | refs/heads/master | 2020-08-21T02:48:54.719532 | 2020-01-25T01:41:37 | 2020-01-25T01:41:37 | 216,089,955 | 0 | 1 | BSD-3-Clause | 2020-01-25T01:41:39 | 2019-10-18T19:03:17 | Python | UTF-8 | Python | false | false | 1,856 | py | from __future__ import absolute_import, division, print_function
from cctbx import xray
from cctbx import miller
from cctbx.command_line.structure_factor_timings import read_structure
import sys
def timings(structure, wing_cutoff=1.e-3):
print("wing_cutoff for following fft calculations: %3.1e"%wing_cutoff)
for calc_type,exp_table_one_over_step_size in (("exp function:",0),
("exp table:",-100)):
print(calc_type)
for d_min in [4,3,2,1]:
structure_ng = structure.deep_copy_scatterers()
structure_ng.scattering_type_registry(d_min=d_min, table="n_gaussian")
structure_4g = structure.deep_copy_scatterers()
structure_4g.scattering_type_registry(table="it1992")
miller_set = miller.build_set(
crystal_symmetry=structure,
d_min=d_min,
anomalous_flag=False)
miller_set.show_summary()
times = []
for structure in (structure_ng, structure_4g):
structure.scattering_type_registry().show_summary()
f_calc_object = xray.structure_factors.from_scatterers(
miller_set=miller_set,
wing_cutoff=wing_cutoff,
exp_table_one_over_step_size=exp_table_one_over_step_size)(
xray_structure=structure,
miller_set=miller_set,
algorithm="fft")
times.append(f_calc_object.manager().estimate_time_fft.time_sampling)
print(" %.2f seconds," % times[-1])
print("d_min=%d: %.2f s / %.2f s" % (d_min, times[0], times[1]), end=' ')
if (times[1] != 0):
print("= %.2f" % (times[0] / times[1]), end=' ')
print()
sys.stdout.flush()
print()
def run(args):
assert len(args) == 1
structure = read_structure(args[0])
structure.show_summary()
print()
timings(structure=structure)
if (__name__ == "__main__"):
run(sys.argv[1:])
| [
"jorge7soccer@gmail.com"
] | jorge7soccer@gmail.com |
1903fd395e39e1a5fe47d28f6a0c5d63f5ac1553 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/errors/types/ad_group_ad_error.py | 350f6684cdb369f03e6e2283d6c52be5fe489255 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,533 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"AdGroupAdErrorEnum",},
)
class AdGroupAdErrorEnum(proto.Message):
r"""Container for enum describing possible ad group ad errors.
"""
class AdGroupAdError(proto.Enum):
r"""Enum describing possible ad group ad errors."""
UNSPECIFIED = 0
UNKNOWN = 1
AD_GROUP_AD_LABEL_DOES_NOT_EXIST = 2
AD_GROUP_AD_LABEL_ALREADY_EXISTS = 3
AD_NOT_UNDER_ADGROUP = 4
CANNOT_OPERATE_ON_REMOVED_ADGROUPAD = 5
CANNOT_CREATE_DEPRECATED_ADS = 6
CANNOT_CREATE_TEXT_ADS = 7
EMPTY_FIELD = 8
RESOURCE_REFERENCED_IN_MULTIPLE_OPS = 9
AD_TYPE_CANNOT_BE_PAUSED = 10
AD_TYPE_CANNOT_BE_REMOVED = 11
CANNOT_UPDATE_DEPRECATED_ADS = 12
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
b2303e8d7506b83777e1e38c909ccd85fcbcbcc9 | 0725d2a93c9058113d0127501fa7bbea28b4f8b1 | /venv/Lib/site-packages/tencentcloud/emr/v20190103/models.py | a26889450a0561c47cc5ae75a501f5035ed2594e | [] | no_license | liugngg/liug-001 | 8c9d39fab49b1895c213814dfec4b1bff649671b | 9ae093a3f7e042b29e756620311f0a57ad083f5c | refs/heads/master | 2023-06-06T00:14:52.548147 | 2021-06-25T07:30:26 | 2021-06-25T07:30:26 | 380,157,121 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140,185 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class BootstrapAction(AbstractModel):
"""引导脚本
"""
def __init__(self):
"""
:param Path: 脚本位置,支持cos上的文件,且只支持https协议。
:type Path: str
:param WhenRun: 执行时间。
resourceAfter 表示在机器资源申请成功后执行。
clusterBefore 表示在集群初始化前执行。
clusterAfter 表示在集群初始化后执行。
:type WhenRun: str
:param Args: 脚本参数
:type Args: list of str
"""
self.Path = None
self.WhenRun = None
self.Args = None
def _deserialize(self, params):
self.Path = params.get("Path")
self.WhenRun = params.get("WhenRun")
self.Args = params.get("Args")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class COSSettings(AbstractModel):
"""COS 相关配置
"""
def __init__(self):
"""
:param CosSecretId: COS SecretId
:type CosSecretId: str
:param CosSecretKey: COS SecrectKey
:type CosSecretKey: str
:param LogOnCosPath: 日志存储在COS上的路径
:type LogOnCosPath: str
"""
self.CosSecretId = None
self.CosSecretKey = None
self.LogOnCosPath = None
def _deserialize(self, params):
self.CosSecretId = params.get("CosSecretId")
self.CosSecretKey = params.get("CosSecretKey")
self.LogOnCosPath = params.get("LogOnCosPath")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CdbInfo(AbstractModel):
"""出参
"""
def __init__(self):
"""
:param InstanceName: 数据库实例
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param Ip: 数据库IP
注意:此字段可能返回 null,表示取不到有效值。
:type Ip: str
:param Port: 数据库端口
注意:此字段可能返回 null,表示取不到有效值。
:type Port: int
:param MemSize: 数据库内存规格
注意:此字段可能返回 null,表示取不到有效值。
:type MemSize: int
:param Volume: 数据库磁盘规格
注意:此字段可能返回 null,表示取不到有效值。
:type Volume: int
:param Service: 服务标识
注意:此字段可能返回 null,表示取不到有效值。
:type Service: str
:param ExpireTime: 过期时间
注意:此字段可能返回 null,表示取不到有效值。
:type ExpireTime: str
:param ApplyTime: 申请时间
注意:此字段可能返回 null,表示取不到有效值。
:type ApplyTime: str
:param PayType: 付费类型
注意:此字段可能返回 null,表示取不到有效值。
:type PayType: int
:param ExpireFlag: 过期标识
注意:此字段可能返回 null,表示取不到有效值。
:type ExpireFlag: bool
:param Status: 数据库状态
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param IsAutoRenew: 续费标识
注意:此字段可能返回 null,表示取不到有效值。
:type IsAutoRenew: int
:param SerialNo: 数据库字符串
注意:此字段可能返回 null,表示取不到有效值。
:type SerialNo: str
:param ZoneId: ZoneId
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneId: int
:param RegionId: RegionId
注意:此字段可能返回 null,表示取不到有效值。
:type RegionId: int
"""
self.InstanceName = None
self.Ip = None
self.Port = None
self.MemSize = None
self.Volume = None
self.Service = None
self.ExpireTime = None
self.ApplyTime = None
self.PayType = None
self.ExpireFlag = None
self.Status = None
self.IsAutoRenew = None
self.SerialNo = None
self.ZoneId = None
self.RegionId = None
def _deserialize(self, params):
self.InstanceName = params.get("InstanceName")
self.Ip = params.get("Ip")
self.Port = params.get("Port")
self.MemSize = params.get("MemSize")
self.Volume = params.get("Volume")
self.Service = params.get("Service")
self.ExpireTime = params.get("ExpireTime")
self.ApplyTime = params.get("ApplyTime")
self.PayType = params.get("PayType")
self.ExpireFlag = params.get("ExpireFlag")
self.Status = params.get("Status")
self.IsAutoRenew = params.get("IsAutoRenew")
self.SerialNo = params.get("SerialNo")
self.ZoneId = params.get("ZoneId")
self.RegionId = params.get("RegionId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ClusterInstancesInfo(AbstractModel):
"""集群实例信息
"""
def __init__(self):
"""
:param Id: ID号
注意:此字段可能返回 null,表示取不到有效值。
:type Id: int
:param ClusterId: 集群ID
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterId: str
:param Ftitle: 标题
注意:此字段可能返回 null,表示取不到有效值。
:type Ftitle: str
:param ClusterName: 集群名
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterName: str
:param RegionId: 地域ID
注意:此字段可能返回 null,表示取不到有效值。
:type RegionId: int
:param ZoneId: 地区ID
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneId: int
:param AppId: 用户APPID
注意:此字段可能返回 null,表示取不到有效值。
:type AppId: int
:param Uin: 用户UIN
注意:此字段可能返回 null,表示取不到有效值。
:type Uin: str
:param ProjectId: 项目Id
注意:此字段可能返回 null,表示取不到有效值。
:type ProjectId: int
:param VpcId: 集群VPCID
注意:此字段可能返回 null,表示取不到有效值。
:type VpcId: int
:param SubnetId: 子网ID
注意:此字段可能返回 null,表示取不到有效值。
:type SubnetId: int
:param Status: 实例的状态码。取值范围:
<li>2:表示集群运行中。</li>
<li>3:表示集群创建中。</li>
<li>4:表示集群扩容中。</li>
<li>5:表示集群增加router节点中。</li>
<li>6:表示集群安装组件中。</li>
<li>7:表示集群执行命令中。</li>
<li>8:表示重启服务中。</li>
<li>9:表示进入维护中。</li>
<li>10:表示服务暂停中。</li>
<li>11:表示退出维护中。</li>
<li>12:表示退出暂停中。</li>
<li>13:表示配置下发中。</li>
<li>14:表示销毁集群中。</li>
<li>15:表示销毁core节点中。</li>
<li>16:销毁task节点中。</li>
<li>17:表示销毁router节点中。</li>
<li>18:表示更改webproxy密码中。</li>
<li>19:表示集群隔离中。</li>
<li>20:表示集群冲正中。</li>
<li>21:表示集群回收中。</li>
<li>22:表示变配等待中。</li>
<li>23:表示集群已隔离。</li>
<li>24:表示缩容节点中。</li>
<li>33:表示集群等待退费中。</li>
<li>34:表示集群已退费。</li>
<li>301:表示创建失败。</li>
<li>302:表示扩容失败。</li>
注意:此字段可能返回 null,表示取不到有效值。
:type Status: int
:param AddTime: 添加时间
注意:此字段可能返回 null,表示取不到有效值。
:type AddTime: str
:param RunTime: 已经运行时间
注意:此字段可能返回 null,表示取不到有效值。
:type RunTime: str
:param Config: 集群产品配置信息
注意:此字段可能返回 null,表示取不到有效值。
:type Config: :class:`tencentcloud.emr.v20190103.models.EmrProductConfigOutter`
:param MasterIp: 主节点外网IP
注意:此字段可能返回 null,表示取不到有效值。
:type MasterIp: str
:param EmrVersion: EMR版本
注意:此字段可能返回 null,表示取不到有效值。
:type EmrVersion: str
:param ChargeType: 收费类型
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeType: int
:param TradeVersion: 交易版本
注意:此字段可能返回 null,表示取不到有效值。
:type TradeVersion: int
:param ResourceOrderId: 资源订单ID
注意:此字段可能返回 null,表示取不到有效值。
:type ResourceOrderId: int
:param IsTradeCluster: 是否计费集群
注意:此字段可能返回 null,表示取不到有效值。
:type IsTradeCluster: int
:param AlarmInfo: 集群错误状态告警信息
注意:此字段可能返回 null,表示取不到有效值。
:type AlarmInfo: str
:param IsWoodpeckerCluster: 是否采用新架构
注意:此字段可能返回 null,表示取不到有效值。
:type IsWoodpeckerCluster: int
:param MetaDb: 元数据库信息
注意:此字段可能返回 null,表示取不到有效值。
:type MetaDb: str
:param Tags: 标签信息
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of Tag
:param HiveMetaDb: Hive元数据信息
注意:此字段可能返回 null,表示取不到有效值。
:type HiveMetaDb: str
:param ServiceClass: 集群类型:EMR,CLICKHOUSE,DRUID
注意:此字段可能返回 null,表示取不到有效值。
:type ServiceClass: str
:param AliasInfo: 集群所有节点的别名序列化
注意:此字段可能返回 null,表示取不到有效值。
:type AliasInfo: str
:param ProductId: 集群版本Id
注意:此字段可能返回 null,表示取不到有效值。
:type ProductId: int
"""
self.Id = None
self.ClusterId = None
self.Ftitle = None
self.ClusterName = None
self.RegionId = None
self.ZoneId = None
self.AppId = None
self.Uin = None
self.ProjectId = None
self.VpcId = None
self.SubnetId = None
self.Status = None
self.AddTime = None
self.RunTime = None
self.Config = None
self.MasterIp = None
self.EmrVersion = None
self.ChargeType = None
self.TradeVersion = None
self.ResourceOrderId = None
self.IsTradeCluster = None
self.AlarmInfo = None
self.IsWoodpeckerCluster = None
self.MetaDb = None
self.Tags = None
self.HiveMetaDb = None
self.ServiceClass = None
self.AliasInfo = None
self.ProductId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.ClusterId = params.get("ClusterId")
self.Ftitle = params.get("Ftitle")
self.ClusterName = params.get("ClusterName")
self.RegionId = params.get("RegionId")
self.ZoneId = params.get("ZoneId")
self.AppId = params.get("AppId")
self.Uin = params.get("Uin")
self.ProjectId = params.get("ProjectId")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.Status = params.get("Status")
self.AddTime = params.get("AddTime")
self.RunTime = params.get("RunTime")
if params.get("Config") is not None:
self.Config = EmrProductConfigOutter()
self.Config._deserialize(params.get("Config"))
self.MasterIp = params.get("MasterIp")
self.EmrVersion = params.get("EmrVersion")
self.ChargeType = params.get("ChargeType")
self.TradeVersion = params.get("TradeVersion")
self.ResourceOrderId = params.get("ResourceOrderId")
self.IsTradeCluster = params.get("IsTradeCluster")
self.AlarmInfo = params.get("AlarmInfo")
self.IsWoodpeckerCluster = params.get("IsWoodpeckerCluster")
self.MetaDb = params.get("MetaDb")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.HiveMetaDb = params.get("HiveMetaDb")
self.ServiceClass = params.get("ServiceClass")
self.AliasInfo = params.get("AliasInfo")
self.ProductId = params.get("ProductId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ClusterSetting(AbstractModel):
"""集群配置。
"""
def __init__(self):
"""
:param InstanceChargeType: 付费方式。
PREPAID 包年包月。
POSTPAID_BY_HOUR 按量计费,默认方式。
:type InstanceChargeType: str
:param SupportHA: 是否为HA集群。
:type SupportHA: bool
:param SecurityGroupIds: 集群所使用的安全组,目前仅支持一个。
:type SecurityGroupIds: list of str
:param Placement: 实例位置。
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param VPCSettings: 实例所在VPC。
:type VPCSettings: :class:`tencentcloud.emr.v20190103.models.VPCSettings`
:param LoginSettings: 实例登录配置。
:type LoginSettings: :class:`tencentcloud.emr.v20190103.models.LoginSettings`
:param TagSpecification: 实例标签。
:type TagSpecification: list of str
:param MetaDB: 元数据库配置。
:type MetaDB: :class:`tencentcloud.emr.v20190103.models.MetaDbInfo`
:param ResourceSpec: 实例硬件配置。
:type ResourceSpec: :class:`tencentcloud.emr.v20190103.models.JobFlowResourceSpec`
:param PublicIpAssigned: 是否申请公网IP,默认为false。
:type PublicIpAssigned: bool
:param InstanceChargePrepaid: 包年包月配置,只对包年包月集群生效。
:type InstanceChargePrepaid: :class:`tencentcloud.emr.v20190103.models.InstanceChargePrepaid`
:param DisasterRecoverGroupIds: 集群置放群组。
:type DisasterRecoverGroupIds: str
:param CbsEncryptFlag: 是否使用cbs加密。
:type CbsEncryptFlag: bool
:param RemoteTcpDefaultPort: 是否使用远程登录,默认为false。
:type RemoteTcpDefaultPort: bool
"""
self.InstanceChargeType = None
self.SupportHA = None
self.SecurityGroupIds = None
self.Placement = None
self.VPCSettings = None
self.LoginSettings = None
self.TagSpecification = None
self.MetaDB = None
self.ResourceSpec = None
self.PublicIpAssigned = None
self.InstanceChargePrepaid = None
self.DisasterRecoverGroupIds = None
self.CbsEncryptFlag = None
self.RemoteTcpDefaultPort = None
def _deserialize(self, params):
self.InstanceChargeType = params.get("InstanceChargeType")
self.SupportHA = params.get("SupportHA")
self.SecurityGroupIds = params.get("SecurityGroupIds")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
if params.get("VPCSettings") is not None:
self.VPCSettings = VPCSettings()
self.VPCSettings._deserialize(params.get("VPCSettings"))
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.TagSpecification = params.get("TagSpecification")
if params.get("MetaDB") is not None:
self.MetaDB = MetaDbInfo()
self.MetaDB._deserialize(params.get("MetaDB"))
if params.get("ResourceSpec") is not None:
self.ResourceSpec = JobFlowResourceSpec()
self.ResourceSpec._deserialize(params.get("ResourceSpec"))
self.PublicIpAssigned = params.get("PublicIpAssigned")
if params.get("InstanceChargePrepaid") is not None:
self.InstanceChargePrepaid = InstanceChargePrepaid()
self.InstanceChargePrepaid._deserialize(params.get("InstanceChargePrepaid"))
self.DisasterRecoverGroupIds = params.get("DisasterRecoverGroupIds")
self.CbsEncryptFlag = params.get("CbsEncryptFlag")
self.RemoteTcpDefaultPort = params.get("RemoteTcpDefaultPort")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Configuration(AbstractModel):
"""自定义配置参数
"""
def __init__(self):
"""
:param Classification: 配置文件名,支持SPARK、HIVE、HDFS、YARN的部分配置文件自定义。
:type Classification: str
:param Properties: 配置参数通过KV的形式传入,部分文件支持自定义,可以通过特殊的键"content"传入所有内容。
:type Properties: str
"""
self.Classification = None
self.Properties = None
def _deserialize(self, params):
self.Classification = params.get("Classification")
self.Properties = params.get("Properties")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateInstanceRequest(AbstractModel):
"""CreateInstance请求参数结构体
"""
def __init__(self):
"""
:param ProductId: 产品ID,不同产品ID表示不同的EMR产品版本。取值范围:
<li>1:表示EMR-V1.3.1。</li>
<li>2:表示EMR-V2.0.1。</li>
<li>4:表示EMR-V2.1.0。</li>
<li>7:表示EMR-V3.0.0。</li>
:type ProductId: int
:param VPCSettings: 私有网络相关信息配置。通过该参数可以指定私有网络的ID,子网ID等信息。
:type VPCSettings: :class:`tencentcloud.emr.v20190103.models.VPCSettings`
:param Software: 部署的组件列表。不同的EMR产品ID(ProductId:具体含义参考入参ProductId字段)需要选择不同的必选组件:
<li>ProductId为1的时候,必选组件包括:hadoop-2.7.3、knox-1.2.0、zookeeper-3.4.9</li>
<li>ProductId为2的时候,必选组件包括:hadoop-2.7.3、knox-1.2.0、zookeeper-3.4.9</li>
<li>ProductId为4的时候,必选组件包括:hadoop-2.8.4、knox-1.2.0、zookeeper-3.4.9</li>
<li>ProductId为7的时候,必选组件包括:hadoop-3.1.2、knox-1.2.0、zookeeper-3.4.9</li>
:type Software: list of str
:param ResourceSpec: 节点资源的规格。
:type ResourceSpec: :class:`tencentcloud.emr.v20190103.models.NewResourceSpec`
:param SupportHA: 是否开启节点高可用。取值范围:
<li>0:表示不开启节点高可用。</li>
<li>1:表示开启节点高可用。</li>
:type SupportHA: int
:param InstanceName: 实例名称。
<li>长度限制为6-36个字符。</li>
<li>只允许包含中文、字母、数字、-、_。</li>
:type InstanceName: str
:param PayMode: 实例计费模式。取值范围:
<li>0:表示按量计费。</li>
<li>1:表示包年包月。</li>
:type PayMode: int
:param Placement: 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param TimeSpan: 购买实例的时长。结合TimeUnit一起使用。
<li>TimeUnit为s时,该参数只能填写3600,表示按量计费实例。</li>
<li>TimeUnit为m时,该参数填写的数字表示包年包月实例的购买时长,如1表示购买一个月</li>
:type TimeSpan: int
:param TimeUnit: 购买实例的时间单位。取值范围:
<li>s:表示秒。PayMode取值为0时,TimeUnit只能取值为s。</li>
<li>m:表示月份。PayMode取值为1时,TimeUnit只能取值为m。</li>
:type TimeUnit: str
:param LoginSettings: 实例登录设置。通过该参数可以设置所购买节点的登录方式密码或者密钥。
<li>设置密钥时,密码仅用于组件原生WebUI快捷入口登录。</li>
<li>未设置密钥时,密码用于登录所购节点以及组件原生WebUI快捷入口登录。</li>
:type LoginSettings: :class:`tencentcloud.emr.v20190103.models.LoginSettings`
:param COSSettings: 开启COS访问需要设置的参数。
:type COSSettings: :class:`tencentcloud.emr.v20190103.models.COSSettings`
:param SgId: 实例所属安全组的ID,形如sg-xxxxxxxx。该参数可以通过调用 [DescribeSecurityGroups](https://cloud.tencent.com/document/api/215/15808) 的返回值中的SecurityGroupId字段来获取。
:type SgId: str
:param PreExecutedFileSettings: 引导操作脚本设置。
:type PreExecutedFileSettings: list of PreExecuteFileSettings
:param AutoRenew: 包年包月实例是否自动续费。取值范围:
<li>0:表示不自动续费。</li>
<li>1:表示自动续费。</li>
:type AutoRenew: int
:param ClientToken: 客户端Token。
:type ClientToken: str
:param NeedMasterWan: 是否开启集群Master节点公网。取值范围:
<li>NEED_MASTER_WAN:表示开启集群Master节点公网。</li>
<li>NOT_NEED_MASTER_WAN:表示不开启。</li>默认开启集群Master节点公网。
:type NeedMasterWan: str
:param RemoteLoginAtCreate: 是否需要开启外网远程登录,即22号端口。在SgId不为空时,该参数无效。
:type RemoteLoginAtCreate: int
:param CheckSecurity: 是否开启安全集群。0表示不开启,非0表示开启。
:type CheckSecurity: int
:param ExtendFsField: 访问外部文件系统。
:type ExtendFsField: str
:param Tags: 标签描述列表。通过指定该参数可以同时绑定标签到相应的实例。
:type Tags: list of Tag
:param DisasterRecoverGroupIds: 分散置放群组ID列表,当前只支持指定一个。
:type DisasterRecoverGroupIds: list of str
:param CbsEncrypt: 集群维度CBS加密盘,默认0表示不加密,1表示加密
:type CbsEncrypt: int
:param MetaType: hive共享元数据库类型。取值范围:
<li>EMR_NEW_META:表示集群默认创建</li>
<li>EMR_EXIT_METE:表示集群使用指定EMR-MetaDB。</li>
<li>USER_CUSTOM_META:表示集群使用自定义MetaDB。</li>
:type MetaType: str
:param UnifyMetaInstanceId: EMR-MetaDB实例
:type UnifyMetaInstanceId: str
:param MetaDBInfo: 自定义MetaDB信息
:type MetaDBInfo: :class:`tencentcloud.emr.v20190103.models.CustomMetaInfo`
:param ApplicationRole: 自定义应用角色。
:type ApplicationRole: str
"""
self.ProductId = None
self.VPCSettings = None
self.Software = None
self.ResourceSpec = None
self.SupportHA = None
self.InstanceName = None
self.PayMode = None
self.Placement = None
self.TimeSpan = None
self.TimeUnit = None
self.LoginSettings = None
self.COSSettings = None
self.SgId = None
self.PreExecutedFileSettings = None
self.AutoRenew = None
self.ClientToken = None
self.NeedMasterWan = None
self.RemoteLoginAtCreate = None
self.CheckSecurity = None
self.ExtendFsField = None
self.Tags = None
self.DisasterRecoverGroupIds = None
self.CbsEncrypt = None
self.MetaType = None
self.UnifyMetaInstanceId = None
self.MetaDBInfo = None
self.ApplicationRole = None
def _deserialize(self, params):
self.ProductId = params.get("ProductId")
if params.get("VPCSettings") is not None:
self.VPCSettings = VPCSettings()
self.VPCSettings._deserialize(params.get("VPCSettings"))
self.Software = params.get("Software")
if params.get("ResourceSpec") is not None:
self.ResourceSpec = NewResourceSpec()
self.ResourceSpec._deserialize(params.get("ResourceSpec"))
self.SupportHA = params.get("SupportHA")
self.InstanceName = params.get("InstanceName")
self.PayMode = params.get("PayMode")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.TimeSpan = params.get("TimeSpan")
self.TimeUnit = params.get("TimeUnit")
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
if params.get("COSSettings") is not None:
self.COSSettings = COSSettings()
self.COSSettings._deserialize(params.get("COSSettings"))
self.SgId = params.get("SgId")
if params.get("PreExecutedFileSettings") is not None:
self.PreExecutedFileSettings = []
for item in params.get("PreExecutedFileSettings"):
obj = PreExecuteFileSettings()
obj._deserialize(item)
self.PreExecutedFileSettings.append(obj)
self.AutoRenew = params.get("AutoRenew")
self.ClientToken = params.get("ClientToken")
self.NeedMasterWan = params.get("NeedMasterWan")
self.RemoteLoginAtCreate = params.get("RemoteLoginAtCreate")
self.CheckSecurity = params.get("CheckSecurity")
self.ExtendFsField = params.get("ExtendFsField")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.DisasterRecoverGroupIds = params.get("DisasterRecoverGroupIds")
self.CbsEncrypt = params.get("CbsEncrypt")
self.MetaType = params.get("MetaType")
self.UnifyMetaInstanceId = params.get("UnifyMetaInstanceId")
if params.get("MetaDBInfo") is not None:
self.MetaDBInfo = CustomMetaInfo()
self.MetaDBInfo._deserialize(params.get("MetaDBInfo"))
self.ApplicationRole = params.get("ApplicationRole")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CreateInstanceResponse(AbstractModel):
"""CreateInstance返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class CustomMetaInfo(AbstractModel):
"""用户自建Hive-MetaDB信息
"""
def __init__(self):
"""
:param MetaDataJdbcUrl: 自定义MetaDB的JDBC连接,请以 jdbc:mysql:// 开头
:type MetaDataJdbcUrl: str
:param MetaDataUser: 自定义MetaDB用户名
:type MetaDataUser: str
:param MetaDataPass: 自定义MetaDB密码
:type MetaDataPass: str
"""
self.MetaDataJdbcUrl = None
self.MetaDataUser = None
self.MetaDataPass = None
def _deserialize(self, params):
self.MetaDataJdbcUrl = params.get("MetaDataJdbcUrl")
self.MetaDataUser = params.get("MetaDataUser")
self.MetaDataPass = params.get("MetaDataPass")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeClusterNodesRequest(AbstractModel):
"""DescribeClusterNodes请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 集群实例ID,实例ID形如: emr-xxxxxxxx
:type InstanceId: str
:param NodeFlag: 节点标识,取值为:
<li>all:表示获取全部类型节点,cdb信息除外。</li>
<li>master:表示获取master节点信息。</li>
<li>core:表示获取core节点信息。</li>
<li>task:表示获取task节点信息。</li>
<li>common:表示获取common节点信息。</li>
<li>router:表示获取router节点信息。</li>
<li>db:表示获取正常状态的cdb信息。</li>
<li>recyle:表示获取回收站隔离中的节点信息,包括cdb信息。</li>
<li>renew:表示获取所有待续费的节点信息,包括cdb信息,自动续费节点不会返回。</li>
注意:现在只支持以上取值,输入其他值会导致错误。
:type NodeFlag: str
:param Offset: 页编号,默认值为0,表示第一页。
:type Offset: int
:param Limit: 每页返回数量,默认值为100,最大值为100。
:type Limit: int
:param HardwareResourceType: 资源类型:支持all/host/pod,默认为all
:type HardwareResourceType: str
:param SearchFields: 支持搜索的字段
:type SearchFields: list of SearchItem
"""
self.InstanceId = None
self.NodeFlag = None
self.Offset = None
self.Limit = None
self.HardwareResourceType = None
self.SearchFields = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.NodeFlag = params.get("NodeFlag")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.HardwareResourceType = params.get("HardwareResourceType")
if params.get("SearchFields") is not None:
self.SearchFields = []
for item in params.get("SearchFields"):
obj = SearchItem()
obj._deserialize(item)
self.SearchFields.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeClusterNodesResponse(AbstractModel):
"""DescribeClusterNodes返回参数结构体
"""
def __init__(self):
"""
:param TotalCnt: 查询到的节点总数
:type TotalCnt: int
:param NodeList: 节点详细信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type NodeList: list of NodeHardwareInfo
:param TagKeys: 用户所有的标签键列表
注意:此字段可能返回 null,表示取不到有效值。
:type TagKeys: list of str
:param HardwareResourceTypeList: 资源类型列表
注意:此字段可能返回 null,表示取不到有效值。
:type HardwareResourceTypeList: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCnt = None
self.NodeList = None
self.TagKeys = None
self.HardwareResourceTypeList = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCnt = params.get("TotalCnt")
if params.get("NodeList") is not None:
self.NodeList = []
for item in params.get("NodeList"):
obj = NodeHardwareInfo()
obj._deserialize(item)
self.NodeList.append(obj)
self.TagKeys = params.get("TagKeys")
self.HardwareResourceTypeList = params.get("HardwareResourceTypeList")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstanceRenewNodesRequest(AbstractModel):
"""DescribeInstanceRenewNodes请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 集群实例ID,实例ID形如: emr-xxxxxxxx
:type InstanceId: str
"""
self.InstanceId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstanceRenewNodesResponse(AbstractModel):
"""DescribeInstanceRenewNodes返回参数结构体
"""
def __init__(self):
"""
:param TotalCnt: 查询到的节点总数
:type TotalCnt: int
:param NodeList: 节点详细信息列表
注意:此字段可能返回 null,表示取不到有效值。
:type NodeList: list of RenewInstancesInfo
:param MetaInfo: 用户所有的标签键列表
注意:此字段可能返回 null,表示取不到有效值。
:type MetaInfo: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCnt = None
self.NodeList = None
self.MetaInfo = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCnt = params.get("TotalCnt")
if params.get("NodeList") is not None:
self.NodeList = []
for item in params.get("NodeList"):
obj = RenewInstancesInfo()
obj._deserialize(item)
self.NodeList.append(obj)
self.MetaInfo = params.get("MetaInfo")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstancesRequest(AbstractModel):
"""DescribeInstances请求参数结构体
"""
def __init__(self):
"""
:param DisplayStrategy: 集群筛选策略。取值范围:
<li>clusterList:表示查询除了已销毁集群之外的集群列表。</li>
<li>monitorManage:表示查询除了已销毁、创建中以及创建失败的集群之外的集群列表。</li>
<li>cloudHardwareManage/componentManage:目前这两个取值为预留取值,暂时和monitorManage表示同样的含义。</li>
:type DisplayStrategy: str
:param InstanceIds: 按照一个或者多个实例ID查询。实例ID形如: emr-xxxxxxxx 。(此参数的具体格式可参考API[简介](https://cloud.tencent.com/document/api/213/15688)的 Ids.N 一节)。如果不填写实例ID,返回该APPID下所有实例列表。
:type InstanceIds: list of str
:param Offset: 页编号,默认值为0,表示第一页。
:type Offset: int
:param Limit: 每页返回数量,默认值为10,最大值为100。
:type Limit: int
:param ProjectId: 建议必填-1,表示拉取所有项目下的集群。
不填默认值为0,表示拉取默认项目下的集群。
实例所属项目ID。该参数可以通过调用 [DescribeProject](https://cloud.tencent.com/document/api/378/4400) 的返回值中的 projectId 字段来获取。
:type ProjectId: int
:param OrderField: 排序字段。取值范围:
<li>clusterId:表示按照实例ID排序。</li>
<li>addTime:表示按照实例创建时间排序。</li>
<li>status:表示按照实例的状态码排序。</li>
:type OrderField: str
:param Asc: 按照OrderField升序或者降序进行排序。取值范围:
<li>0:表示降序。</li>
<li>1:表示升序。</li>默认值为0。
:type Asc: int
"""
self.DisplayStrategy = None
self.InstanceIds = None
self.Offset = None
self.Limit = None
self.ProjectId = None
self.OrderField = None
self.Asc = None
def _deserialize(self, params):
self.DisplayStrategy = params.get("DisplayStrategy")
self.InstanceIds = params.get("InstanceIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ProjectId = params.get("ProjectId")
self.OrderField = params.get("OrderField")
self.Asc = params.get("Asc")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeInstancesResponse(AbstractModel):
"""DescribeInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCnt: 符合条件的实例总数。
:type TotalCnt: int
:param ClusterList: EMR实例详细信息列表。
注意:此字段可能返回 null,表示取不到有效值。
:type ClusterList: list of ClusterInstancesInfo
:param TagKeys: 实例关联的标签键列表。
注意:此字段可能返回 null,表示取不到有效值。
:type TagKeys: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCnt = None
self.ClusterList = None
self.TagKeys = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCnt = params.get("TotalCnt")
if params.get("ClusterList") is not None:
self.ClusterList = []
for item in params.get("ClusterList"):
obj = ClusterInstancesInfo()
obj._deserialize(item)
self.ClusterList.append(obj)
self.TagKeys = params.get("TagKeys")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeJobFlowRequest(AbstractModel):
"""DescribeJobFlow请求参数结构体
"""
def __init__(self):
"""
:param JobFlowId: 流程任务Id,RunJobFlow接口返回的值。
:type JobFlowId: int
"""
self.JobFlowId = None
def _deserialize(self, params):
self.JobFlowId = params.get("JobFlowId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DescribeJobFlowResponse(AbstractModel):
"""DescribeJobFlow返回参数结构体
"""
def __init__(self):
"""
:param State: 流程任务状态,可以为以下值:
JobFlowInit,流程任务初始化。
JobFlowResourceApplied,资源申请中,通常为JobFlow需要新建集群时的状态。
JobFlowResourceReady,执行流程任务的资源就绪。
JobFlowStepsRunning,流程任务步骤已提交。
JobFlowStepsComplete,流程任务步骤已完成。
JobFlowTerminating,流程任务所需资源销毁中。
JobFlowFinish,流程任务已完成。
:type State: str
:param Details: 流程任务步骤结果。
:type Details: list of JobResult
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.State = None
self.Details = None
self.RequestId = None
def _deserialize(self, params):
self.State = params.get("State")
if params.get("Details") is not None:
self.Details = []
for item in params.get("Details"):
obj = JobResult()
obj._deserialize(item)
self.Details.append(obj)
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DiskGroup(AbstractModel):
"""磁盘组。
"""
def __init__(self):
"""
:param Spec: 磁盘规格。
:type Spec: :class:`tencentcloud.emr.v20190103.models.DiskSpec`
:param Count: 同类型磁盘数量。
:type Count: int
"""
self.Spec = None
self.Count = None
def _deserialize(self, params):
if params.get("Spec") is not None:
self.Spec = DiskSpec()
self.Spec._deserialize(params.get("Spec"))
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DiskSpec(AbstractModel):
"""磁盘描述。
"""
def __init__(self):
"""
:param DiskType: 磁盘类型。
LOCAL_BASIC 本地盘。
CLOUD_BASIC 云硬盘。
LOCAL_SSD 本地SSD。
CLOUD_SSD 云SSD。
CLOUD_PREMIUM 高效云盘。
CLOUD_HSSD 增强型云SSD。
:type DiskType: str
:param DiskSize: 磁盘大小,单位GB。
:type DiskSize: int
"""
self.DiskType = None
self.DiskSize = None
def _deserialize(self, params):
self.DiskType = params.get("DiskType")
self.DiskSize = params.get("DiskSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class DynamicPodSpec(AbstractModel):
"""POD浮动规格
"""
def __init__(self):
"""
:param RequestCpu: 需求最小cpu核数
:type RequestCpu: float
:param LimitCpu: 需求最大cpu核数
:type LimitCpu: float
:param RequestMemory: 需求最小memory,单位MB
:type RequestMemory: float
:param LimitMemory: 需求最大memory,单位MB
:type LimitMemory: float
"""
self.RequestCpu = None
self.LimitCpu = None
self.RequestMemory = None
self.LimitMemory = None
def _deserialize(self, params):
self.RequestCpu = params.get("RequestCpu")
self.LimitCpu = params.get("LimitCpu")
self.RequestMemory = params.get("RequestMemory")
self.LimitMemory = params.get("LimitMemory")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class EmrProductConfigOutter(AbstractModel):
"""EMR产品配置
"""
def __init__(self):
"""
:param SoftInfo: 软件信息
注意:此字段可能返回 null,表示取不到有效值。
:type SoftInfo: list of str
:param MasterNodeSize: Master节点个数
注意:此字段可能返回 null,表示取不到有效值。
:type MasterNodeSize: int
:param CoreNodeSize: Core节点个数
注意:此字段可能返回 null,表示取不到有效值。
:type CoreNodeSize: int
:param TaskNodeSize: Task节点个数
注意:此字段可能返回 null,表示取不到有效值。
:type TaskNodeSize: int
:param ComNodeSize: Common节点个数
注意:此字段可能返回 null,表示取不到有效值。
:type ComNodeSize: int
:param MasterResource: Master节点资源
注意:此字段可能返回 null,表示取不到有效值。
:type MasterResource: :class:`tencentcloud.emr.v20190103.models.OutterResource`
:param CoreResource: Core节点资源
注意:此字段可能返回 null,表示取不到有效值。
:type CoreResource: :class:`tencentcloud.emr.v20190103.models.OutterResource`
:param TaskResource: Task节点资源
注意:此字段可能返回 null,表示取不到有效值。
:type TaskResource: :class:`tencentcloud.emr.v20190103.models.OutterResource`
:param ComResource: Common节点资源
注意:此字段可能返回 null,表示取不到有效值。
:type ComResource: :class:`tencentcloud.emr.v20190103.models.OutterResource`
:param OnCos: 是否使用COS
注意:此字段可能返回 null,表示取不到有效值。
:type OnCos: bool
:param ChargeType: 收费类型
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeType: int
:param RouterNodeSize: Router节点个数
注意:此字段可能返回 null,表示取不到有效值。
:type RouterNodeSize: int
:param SupportHA: 是否支持HA
注意:此字段可能返回 null,表示取不到有效值。
:type SupportHA: bool
:param SecurityOn: 是否支持安全模式
注意:此字段可能返回 null,表示取不到有效值。
:type SecurityOn: bool
:param SecurityGroup: 安全组名称
注意:此字段可能返回 null,表示取不到有效值。
:type SecurityGroup: str
:param CbsEncrypt: 是否开启Cbs加密
注意:此字段可能返回 null,表示取不到有效值。
:type CbsEncrypt: int
"""
self.SoftInfo = None
self.MasterNodeSize = None
self.CoreNodeSize = None
self.TaskNodeSize = None
self.ComNodeSize = None
self.MasterResource = None
self.CoreResource = None
self.TaskResource = None
self.ComResource = None
self.OnCos = None
self.ChargeType = None
self.RouterNodeSize = None
self.SupportHA = None
self.SecurityOn = None
self.SecurityGroup = None
self.CbsEncrypt = None
def _deserialize(self, params):
self.SoftInfo = params.get("SoftInfo")
self.MasterNodeSize = params.get("MasterNodeSize")
self.CoreNodeSize = params.get("CoreNodeSize")
self.TaskNodeSize = params.get("TaskNodeSize")
self.ComNodeSize = params.get("ComNodeSize")
if params.get("MasterResource") is not None:
self.MasterResource = OutterResource()
self.MasterResource._deserialize(params.get("MasterResource"))
if params.get("CoreResource") is not None:
self.CoreResource = OutterResource()
self.CoreResource._deserialize(params.get("CoreResource"))
if params.get("TaskResource") is not None:
self.TaskResource = OutterResource()
self.TaskResource._deserialize(params.get("TaskResource"))
if params.get("ComResource") is not None:
self.ComResource = OutterResource()
self.ComResource._deserialize(params.get("ComResource"))
self.OnCos = params.get("OnCos")
self.ChargeType = params.get("ChargeType")
self.RouterNodeSize = params.get("RouterNodeSize")
self.SupportHA = params.get("SupportHA")
self.SecurityOn = params.get("SecurityOn")
self.SecurityGroup = params.get("SecurityGroup")
self.CbsEncrypt = params.get("CbsEncrypt")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Execution(AbstractModel):
"""执行动作。
"""
def __init__(self):
"""
:param JobType: 任务类型,目前支持以下类型。
1. “MR”,将通过hadoop jar的方式提交。
2. "HIVE",将通过hive -f的方式提交。
3. "SPARK",将通过spark-submit的方式提交。
:type JobType: str
:param Args: 任务参数,提供除提交指令以外的参数。
:type Args: list of str
"""
self.JobType = None
self.Args = None
def _deserialize(self, params):
self.JobType = params.get("JobType")
self.Args = params.get("Args")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class HostVolumeContext(AbstractModel):
"""Pod HostPath挂载方式描述
"""
def __init__(self):
"""
:param VolumePath: Pod挂载宿主机的目录。资源对宿主机的挂载点,指定的挂载点对应了宿主机的路径,该挂载点在Pod中作为数据存储目录使用
注意:此字段可能返回 null,表示取不到有效值。
:type VolumePath: str
"""
self.VolumePath = None
def _deserialize(self, params):
self.VolumePath = params.get("VolumePath")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquirePriceRenewEmrRequest(AbstractModel):
"""InquirePriceRenewEmr请求参数结构体
"""
def __init__(self):
"""
:param TimeSpan: 实例续费的时长。需要结合TimeUnit一起使用。1表示续费1一个月
:type TimeSpan: int
:param InstanceId: 待续费集群ID列表。
:type InstanceId: str
:param Placement: 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param PayMode: 实例计费模式。此处只支持取值为1,表示包年包月。
:type PayMode: int
:param TimeUnit: 实例续费的时间单位。取值范围:
<li>m:表示月份。</li>
:type TimeUnit: str
:param Currency: 货币种类。取值范围:
<li>CNY:表示人民币。</li>
:type Currency: str
"""
self.TimeSpan = None
self.InstanceId = None
self.Placement = None
self.PayMode = None
self.TimeUnit = None
self.Currency = None
def _deserialize(self, params):
self.TimeSpan = params.get("TimeSpan")
self.InstanceId = params.get("InstanceId")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.PayMode = params.get("PayMode")
self.TimeUnit = params.get("TimeUnit")
self.Currency = params.get("Currency")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquirePriceRenewEmrResponse(AbstractModel):
"""InquirePriceRenewEmr返回参数结构体
"""
def __init__(self):
"""
:param OriginalCost: 原价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalCost: float
:param DiscountCost: 折扣价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountCost: float
:param TimeUnit: 实例续费的时间单位。取值范围:
<li>m:表示月份。</li>
注意:此字段可能返回 null,表示取不到有效值。
:type TimeUnit: str
:param TimeSpan: 实例续费的时长。
注意:此字段可能返回 null,表示取不到有效值。
:type TimeSpan: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.OriginalCost = None
self.DiscountCost = None
self.TimeUnit = None
self.TimeSpan = None
self.RequestId = None
def _deserialize(self, params):
self.OriginalCost = params.get("OriginalCost")
self.DiscountCost = params.get("DiscountCost")
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceCreateInstanceRequest(AbstractModel):
"""InquiryPriceCreateInstance请求参数结构体
"""
def __init__(self):
"""
:param TimeUnit: 购买实例的时间单位。取值范围:
<li>s:表示秒。PayMode取值为0时,TimeUnit只能取值为s。</li>
<li>m:表示月份。PayMode取值为1时,TimeUnit只能取值为m。</li>
:type TimeUnit: str
:param TimeSpan: 购买实例的时长。结合TimeUnit一起使用。
<li>TimeUnit为s时,该参数只能填写3600,表示按量计费实例。</li>
<li>TimeUnit为m时,该参数填写的数字表示包年包月实例的购买时长,如1表示购买一个月</li>
:type TimeSpan: int
:param ResourceSpec: 询价的节点规格。
:type ResourceSpec: :class:`tencentcloud.emr.v20190103.models.NewResourceSpec`
:param Currency: 货币种类。取值范围:
<li>CNY:表示人民币。</li>
:type Currency: str
:param PayMode: 实例计费模式。取值范围:
<li>0:表示按量计费。</li>
<li>1:表示包年包月。</li>
:type PayMode: int
:param SupportHA: 是否开启节点高可用。取值范围:
<li>0:表示不开启节点高可用。</li>
<li>1:表示开启节点高可用。</li>
:type SupportHA: int
:param Software: 部署的组件列表。不同的EMR产品ID(ProductId:具体含义参考入参ProductId字段)需要选择不同的必选组件:
<li>ProductId为1的时候,必选组件包括:hadoop-2.7.3、knox-1.2.0、zookeeper-3.4.9</li>
<li>ProductId为2的时候,必选组件包括:hadoop-2.7.3、knox-1.2.0、zookeeper-3.4.9</li>
<li>ProductId为4的时候,必选组件包括:hadoop-2.8.4、knox-1.2.0、zookeeper-3.4.9</li>
<li>ProductId为7的时候,必选组件包括:hadoop-3.1.2、knox-1.2.0、zookeeper-3.4.9</li>
:type Software: list of str
:param Placement: 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param VPCSettings: 私有网络相关信息配置。通过该参数可以指定私有网络的ID,子网ID等信息。
:type VPCSettings: :class:`tencentcloud.emr.v20190103.models.VPCSettings`
:param MetaType: hive共享元数据库类型。取值范围:
<li>EMR_NEW_META:表示集群默认创建</li>
<li>EMR_EXIT_METE:表示集群使用指定EMR-MetaDB。</li>
<li>USER_CUSTOM_META:表示集群使用自定义MetaDB。</li>
:type MetaType: str
:param UnifyMetaInstanceId: EMR-MetaDB实例
:type UnifyMetaInstanceId: str
:param MetaDBInfo: 自定义MetaDB信息
:type MetaDBInfo: :class:`tencentcloud.emr.v20190103.models.CustomMetaInfo`
:param ProductId: 产品ID,不同产品ID表示不同的EMR产品版本。取值范围:
<li>1:表示EMR-V1.3.1。</li>
<li>2:表示EMR-V2.0.1。</li>
<li>4:表示EMR-V2.1.0。</li>
<li>7:表示EMR-V3.0.0。</li>
:type ProductId: int
"""
self.TimeUnit = None
self.TimeSpan = None
self.ResourceSpec = None
self.Currency = None
self.PayMode = None
self.SupportHA = None
self.Software = None
self.Placement = None
self.VPCSettings = None
self.MetaType = None
self.UnifyMetaInstanceId = None
self.MetaDBInfo = None
self.ProductId = None
def _deserialize(self, params):
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
if params.get("ResourceSpec") is not None:
self.ResourceSpec = NewResourceSpec()
self.ResourceSpec._deserialize(params.get("ResourceSpec"))
self.Currency = params.get("Currency")
self.PayMode = params.get("PayMode")
self.SupportHA = params.get("SupportHA")
self.Software = params.get("Software")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
if params.get("VPCSettings") is not None:
self.VPCSettings = VPCSettings()
self.VPCSettings._deserialize(params.get("VPCSettings"))
self.MetaType = params.get("MetaType")
self.UnifyMetaInstanceId = params.get("UnifyMetaInstanceId")
if params.get("MetaDBInfo") is not None:
self.MetaDBInfo = CustomMetaInfo()
self.MetaDBInfo._deserialize(params.get("MetaDBInfo"))
self.ProductId = params.get("ProductId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceCreateInstanceResponse(AbstractModel):
"""InquiryPriceCreateInstance返回参数结构体
"""
def __init__(self):
"""
:param OriginalCost: 原价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalCost: float
:param DiscountCost: 折扣价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountCost: float
:param TimeUnit: 购买实例的时间单位。取值范围:
<li>s:表示秒。</li>
<li>m:表示月份。</li>
注意:此字段可能返回 null,表示取不到有效值。
:type TimeUnit: str
:param TimeSpan: 购买实例的时长。
注意:此字段可能返回 null,表示取不到有效值。
:type TimeSpan: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.OriginalCost = None
self.DiscountCost = None
self.TimeUnit = None
self.TimeSpan = None
self.RequestId = None
def _deserialize(self, params):
self.OriginalCost = params.get("OriginalCost")
self.DiscountCost = params.get("DiscountCost")
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceRenewInstanceRequest(AbstractModel):
"""InquiryPriceRenewInstance请求参数结构体
"""
def __init__(self):
"""
:param TimeSpan: 实例续费的时长。需要结合TimeUnit一起使用。1表示续费1一个月
:type TimeSpan: int
:param ResourceIds: 待续费节点的资源ID列表。资源ID形如:emr-vm-xxxxxxxx。有效的资源ID可通过登录[控制台](https://console.cloud.tencent.com/emr/static/hardware)查询。
:type ResourceIds: list of str
:param Placement: 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param PayMode: 实例计费模式。此处只支持取值为1,表示包年包月。
:type PayMode: int
:param TimeUnit: 实例续费的时间单位。取值范围:
<li>m:表示月份。</li>
:type TimeUnit: str
:param Currency: 货币种类。取值范围:
<li>CNY:表示人民币。</li>
:type Currency: str
"""
self.TimeSpan = None
self.ResourceIds = None
self.Placement = None
self.PayMode = None
self.TimeUnit = None
self.Currency = None
def _deserialize(self, params):
self.TimeSpan = params.get("TimeSpan")
self.ResourceIds = params.get("ResourceIds")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.PayMode = params.get("PayMode")
self.TimeUnit = params.get("TimeUnit")
self.Currency = params.get("Currency")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceRenewInstanceResponse(AbstractModel):
"""InquiryPriceRenewInstance返回参数结构体
"""
def __init__(self):
"""
:param OriginalCost: 原价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalCost: float
:param DiscountCost: 折扣价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountCost: float
:param TimeUnit: 实例续费的时间单位。取值范围:
<li>m:表示月份。</li>
注意:此字段可能返回 null,表示取不到有效值。
:type TimeUnit: str
:param TimeSpan: 实例续费的时长。
注意:此字段可能返回 null,表示取不到有效值。
:type TimeSpan: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.OriginalCost = None
self.DiscountCost = None
self.TimeUnit = None
self.TimeSpan = None
self.RequestId = None
def _deserialize(self, params):
self.OriginalCost = params.get("OriginalCost")
self.DiscountCost = params.get("DiscountCost")
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceScaleOutInstanceRequest(AbstractModel):
"""InquiryPriceScaleOutInstance请求参数结构体
"""
def __init__(self):
"""
:param TimeUnit: 扩容的时间单位。取值范围:
<li>s:表示秒。PayMode取值为0时,TimeUnit只能取值为s。</li>
<li>m:表示月份。PayMode取值为1时,TimeUnit只能取值为m。</li>
:type TimeUnit: str
:param TimeSpan: 扩容的时长。结合TimeUnit一起使用。
<li>TimeUnit为s时,该参数只能填写3600,表示按量计费实例。</li>
<li>TimeUnit为m时,该参数填写的数字表示包年包月实例的购买时长,如1表示购买一个月</li>
:type TimeSpan: int
:param ZoneId: 实例所属的可用区ID,例如100003。该参数可以通过调用 [DescribeZones](https://cloud.tencent.com/document/api/213/15707) 的返回值中的ZoneId字段来获取。
:type ZoneId: int
:param PayMode: 实例计费模式。取值范围:
<li>0:表示按量计费。</li>
<li>1:表示包年包月。</li>
:type PayMode: int
:param InstanceId: 实例ID。
:type InstanceId: str
:param CoreCount: 扩容的Core节点数量。
:type CoreCount: int
:param TaskCount: 扩容的Task节点数量。
:type TaskCount: int
:param Currency: 货币种类。取值范围:
<li>CNY:表示人民币。</li>
:type Currency: str
:param RouterCount: 扩容的Router节点数量。
:type RouterCount: int
:param MasterCount: 扩容的Master节点数量。
:type MasterCount: int
"""
self.TimeUnit = None
self.TimeSpan = None
self.ZoneId = None
self.PayMode = None
self.InstanceId = None
self.CoreCount = None
self.TaskCount = None
self.Currency = None
self.RouterCount = None
self.MasterCount = None
def _deserialize(self, params):
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
self.ZoneId = params.get("ZoneId")
self.PayMode = params.get("PayMode")
self.InstanceId = params.get("InstanceId")
self.CoreCount = params.get("CoreCount")
self.TaskCount = params.get("TaskCount")
self.Currency = params.get("Currency")
self.RouterCount = params.get("RouterCount")
self.MasterCount = params.get("MasterCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceScaleOutInstanceResponse(AbstractModel):
"""InquiryPriceScaleOutInstance返回参数结构体
"""
def __init__(self):
"""
:param OriginalCost: 原价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalCost: str
:param DiscountCost: 折扣价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountCost: str
:param Unit: 扩容的时间单位。取值范围:
<li>s:表示秒。</li>
<li>m:表示月份。</li>
注意:此字段可能返回 null,表示取不到有效值。
:type Unit: str
:param PriceSpec: 询价的节点规格。
注意:此字段可能返回 null,表示取不到有效值。
:type PriceSpec: :class:`tencentcloud.emr.v20190103.models.PriceResource`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.OriginalCost = None
self.DiscountCost = None
self.Unit = None
self.PriceSpec = None
self.RequestId = None
def _deserialize(self, params):
self.OriginalCost = params.get("OriginalCost")
self.DiscountCost = params.get("DiscountCost")
self.Unit = params.get("Unit")
if params.get("PriceSpec") is not None:
self.PriceSpec = PriceResource()
self.PriceSpec._deserialize(params.get("PriceSpec"))
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceUpdateInstanceRequest(AbstractModel):
"""InquiryPriceUpdateInstance请求参数结构体
"""
def __init__(self):
"""
:param TimeUnit: 变配的时间单位。取值范围:
<li>s:表示秒。PayMode取值为0时,TimeUnit只能取值为s。</li>
<li>m:表示月份。PayMode取值为1时,TimeUnit只能取值为m。</li>
:type TimeUnit: str
:param TimeSpan: 变配的时长。结合TimeUnit一起使用。
<li>TimeUnit为s时,该参数只能填写3600,表示按量计费实例。</li>
<li>TimeUnit为m时,该参数填写的数字表示包年包月实例的购买时长,如1表示购买一个月</li>
:type TimeSpan: int
:param UpdateSpec: 节点变配的目标配置。
:type UpdateSpec: :class:`tencentcloud.emr.v20190103.models.UpdateInstanceSettings`
:param PayMode: 实例计费模式。取值范围:
<li>0:表示按量计费。</li>
<li>1:表示包年包月。</li>
:type PayMode: int
:param Placement: 实例所在的位置。通过该参数可以指定实例所属可用区,所属项目等属性。
:type Placement: :class:`tencentcloud.emr.v20190103.models.Placement`
:param Currency: 货币种类。取值范围:
<li>CNY:表示人民币。</li>
:type Currency: str
"""
self.TimeUnit = None
self.TimeSpan = None
self.UpdateSpec = None
self.PayMode = None
self.Placement = None
self.Currency = None
def _deserialize(self, params):
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
if params.get("UpdateSpec") is not None:
self.UpdateSpec = UpdateInstanceSettings()
self.UpdateSpec._deserialize(params.get("UpdateSpec"))
self.PayMode = params.get("PayMode")
if params.get("Placement") is not None:
self.Placement = Placement()
self.Placement._deserialize(params.get("Placement"))
self.Currency = params.get("Currency")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InquiryPriceUpdateInstanceResponse(AbstractModel):
"""InquiryPriceUpdateInstance返回参数结构体
"""
def __init__(self):
"""
:param OriginalCost: 原价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type OriginalCost: float
:param DiscountCost: 折扣价,单位为元。
注意:此字段可能返回 null,表示取不到有效值。
:type DiscountCost: float
:param TimeUnit: 变配的时间单位。取值范围:
<li>s:表示秒。</li>
<li>m:表示月份。</li>
注意:此字段可能返回 null,表示取不到有效值。
:type TimeUnit: str
:param TimeSpan: 变配的时长。
注意:此字段可能返回 null,表示取不到有效值。
:type TimeSpan: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.OriginalCost = None
self.DiscountCost = None
self.TimeUnit = None
self.TimeSpan = None
self.RequestId = None
def _deserialize(self, params):
self.OriginalCost = params.get("OriginalCost")
self.DiscountCost = params.get("DiscountCost")
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceChargePrepaid(AbstractModel):
"""实例预付费参数,只有在付费类型为PREPAID时生效。
"""
def __init__(self):
"""
:param Period: 包年包月时间,默认为1,单位:月。
取值范围:1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12, 24, 36, 48, 60。
:type Period: int
:param RenewFlag: 是否自动续费,默认为否。
:type RenewFlag: bool
"""
self.Period = None
self.RenewFlag = None
def _deserialize(self, params):
self.Period = params.get("Period")
self.RenewFlag = params.get("RenewFlag")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class JobFlowResource(AbstractModel):
"""机器资源描述。
"""
def __init__(self):
"""
:param Spec: 机器类型描述。
:type Spec: str
:param InstanceType: 机器类型描述,可参考CVM的该含义。
:type InstanceType: str
:param Tags: 标签KV对。
:type Tags: list of Tag
:param DiskGroups: 磁盘描述列表。
:type DiskGroups: list of DiskGroup
"""
self.Spec = None
self.InstanceType = None
self.Tags = None
self.DiskGroups = None
def _deserialize(self, params):
self.Spec = params.get("Spec")
self.InstanceType = params.get("InstanceType")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
if params.get("DiskGroups") is not None:
self.DiskGroups = []
for item in params.get("DiskGroups"):
obj = DiskGroup()
obj._deserialize(item)
self.DiskGroups.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class JobFlowResourceSpec(AbstractModel):
"""流程作业资源描述
"""
def __init__(self):
"""
:param MasterCount: 主节点数量。
:type MasterCount: int
:param MasterResourceSpec: 主节点配置。
:type MasterResourceSpec: :class:`tencentcloud.emr.v20190103.models.JobFlowResource`
:param CoreCount: Core节点数量
:type CoreCount: int
:param CoreResourceSpec: Core节点配置。
:type CoreResourceSpec: :class:`tencentcloud.emr.v20190103.models.JobFlowResource`
:param TaskCount: Task节点数量。
:type TaskCount: int
:param CommonCount: Common节点数量。
:type CommonCount: int
:param TaskResourceSpec: Task节点配置。
:type TaskResourceSpec: :class:`tencentcloud.emr.v20190103.models.JobFlowResource`
:param CommonResourceSpec: Common节点配置。
:type CommonResourceSpec: :class:`tencentcloud.emr.v20190103.models.JobFlowResource`
"""
self.MasterCount = None
self.MasterResourceSpec = None
self.CoreCount = None
self.CoreResourceSpec = None
self.TaskCount = None
self.CommonCount = None
self.TaskResourceSpec = None
self.CommonResourceSpec = None
def _deserialize(self, params):
self.MasterCount = params.get("MasterCount")
if params.get("MasterResourceSpec") is not None:
self.MasterResourceSpec = JobFlowResource()
self.MasterResourceSpec._deserialize(params.get("MasterResourceSpec"))
self.CoreCount = params.get("CoreCount")
if params.get("CoreResourceSpec") is not None:
self.CoreResourceSpec = JobFlowResource()
self.CoreResourceSpec._deserialize(params.get("CoreResourceSpec"))
self.TaskCount = params.get("TaskCount")
self.CommonCount = params.get("CommonCount")
if params.get("TaskResourceSpec") is not None:
self.TaskResourceSpec = JobFlowResource()
self.TaskResourceSpec._deserialize(params.get("TaskResourceSpec"))
if params.get("CommonResourceSpec") is not None:
self.CommonResourceSpec = JobFlowResource()
self.CommonResourceSpec._deserialize(params.get("CommonResourceSpec"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class JobResult(AbstractModel):
"""任务步骤结果描述
"""
def __init__(self):
"""
:param Name: 任务步骤名称。
注意:此字段可能返回 null,表示取不到有效值。
:type Name: str
:param ActionOnFailure: 任务步骤失败时的处理策略,可以为以下值:
"CONTINUE",跳过当前失败步骤,继续后续步骤。
“TERMINATE_CLUSTER”,终止当前及后续步骤,并销毁集群。
“CANCEL_AND_WAIT”,取消当前步骤并阻塞等待处理。
注意:此字段可能返回 null,表示取不到有效值。
:type ActionOnFailure: str
:param JobState: 当前步骤的状态,可以为以下值:
“JobFlowStepStatusInit”,初始化状态,等待执行。
“JobFlowStepStatusRunning”,任务步骤正在执行。
“JobFlowStepStatusFailed”,任务步骤执行失败。
“JobFlowStepStatusSucceed”,任务步骤执行成功。
注意:此字段可能返回 null,表示取不到有效值。
:type JobState: str
"""
self.Name = None
self.ActionOnFailure = None
self.JobState = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.ActionOnFailure = params.get("ActionOnFailure")
self.JobState = params.get("JobState")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class LoginSettings(AbstractModel):
"""登录设置
"""
def __init__(self):
"""
:param Password: Password
:type Password: str
:param PublicKeyId: Public Key
:type PublicKeyId: str
"""
self.Password = None
self.PublicKeyId = None
def _deserialize(self, params):
self.Password = params.get("Password")
self.PublicKeyId = params.get("PublicKeyId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MetaDbInfo(AbstractModel):
"""元数据库信息
"""
def __init__(self):
"""
:param MetaType: 元数据类型。
:type MetaType: str
:param UnifyMetaInstanceId: 统一元数据库实例ID。
:type UnifyMetaInstanceId: str
:param MetaDBInfo: 自建元数据库信息。
:type MetaDBInfo: :class:`tencentcloud.emr.v20190103.models.CustomMetaInfo`
"""
self.MetaType = None
self.UnifyMetaInstanceId = None
self.MetaDBInfo = None
def _deserialize(self, params):
self.MetaType = params.get("MetaType")
self.UnifyMetaInstanceId = params.get("UnifyMetaInstanceId")
if params.get("MetaDBInfo") is not None:
self.MetaDBInfo = CustomMetaInfo()
self.MetaDBInfo._deserialize(params.get("MetaDBInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MultiDisk(AbstractModel):
"""多云盘参数
"""
def __init__(self):
"""
:param DiskType: 云盘类型("CLOUD_PREMIUM","CLOUD_SSD","CLOUD_BASIC")的一种
:type DiskType: str
:param Volume: 云盘大小
:type Volume: int
:param Count: 该类型云盘个数
:type Count: int
"""
self.DiskType = None
self.Volume = None
self.Count = None
def _deserialize(self, params):
self.DiskType = params.get("DiskType")
self.Volume = params.get("Volume")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class MultiDiskMC(AbstractModel):
"""多云盘参数
"""
def __init__(self):
"""
:param Count: 该类型云盘个数
注意:此字段可能返回 null,表示取不到有效值。
:type Count: int
:param Type: 磁盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type Type: int
:param Volume: 云盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type Volume: int
"""
self.Count = None
self.Type = None
self.Volume = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.Type = params.get("Type")
self.Volume = params.get("Volume")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class NewResourceSpec(AbstractModel):
"""资源描述
"""
def __init__(self):
"""
:param MasterResourceSpec: 描述Master节点资源
:type MasterResourceSpec: :class:`tencentcloud.emr.v20190103.models.Resource`
:param CoreResourceSpec: 描述Core节点资源
:type CoreResourceSpec: :class:`tencentcloud.emr.v20190103.models.Resource`
:param TaskResourceSpec: 描述Task节点资源
:type TaskResourceSpec: :class:`tencentcloud.emr.v20190103.models.Resource`
:param MasterCount: Master节点数量
:type MasterCount: int
:param CoreCount: Core节点数量
:type CoreCount: int
:param TaskCount: Task节点数量
:type TaskCount: int
:param CommonResourceSpec: 描述Common节点资源
:type CommonResourceSpec: :class:`tencentcloud.emr.v20190103.models.Resource`
:param CommonCount: Common节点数量
:type CommonCount: int
"""
self.MasterResourceSpec = None
self.CoreResourceSpec = None
self.TaskResourceSpec = None
self.MasterCount = None
self.CoreCount = None
self.TaskCount = None
self.CommonResourceSpec = None
self.CommonCount = None
def _deserialize(self, params):
if params.get("MasterResourceSpec") is not None:
self.MasterResourceSpec = Resource()
self.MasterResourceSpec._deserialize(params.get("MasterResourceSpec"))
if params.get("CoreResourceSpec") is not None:
self.CoreResourceSpec = Resource()
self.CoreResourceSpec._deserialize(params.get("CoreResourceSpec"))
if params.get("TaskResourceSpec") is not None:
self.TaskResourceSpec = Resource()
self.TaskResourceSpec._deserialize(params.get("TaskResourceSpec"))
self.MasterCount = params.get("MasterCount")
self.CoreCount = params.get("CoreCount")
self.TaskCount = params.get("TaskCount")
if params.get("CommonResourceSpec") is not None:
self.CommonResourceSpec = Resource()
self.CommonResourceSpec._deserialize(params.get("CommonResourceSpec"))
self.CommonCount = params.get("CommonCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class NodeHardwareInfo(AbstractModel):
"""节点硬件信息
"""
def __init__(self):
"""
:param AppId: 用户APPID
注意:此字段可能返回 null,表示取不到有效值。
:type AppId: int
:param SerialNo: 序列号
注意:此字段可能返回 null,表示取不到有效值。
:type SerialNo: str
:param OrderNo: 机器实例ID
注意:此字段可能返回 null,表示取不到有效值。
:type OrderNo: str
:param WanIp: master节点绑定外网IP
注意:此字段可能返回 null,表示取不到有效值。
:type WanIp: str
:param Flag: 节点类型。0:common节点;1:master节点
;2:core节点;3:task节点
注意:此字段可能返回 null,表示取不到有效值。
:type Flag: int
:param Spec: 节点规格
注意:此字段可能返回 null,表示取不到有效值。
:type Spec: str
:param CpuNum: 节点核数
注意:此字段可能返回 null,表示取不到有效值。
:type CpuNum: int
:param MemSize: 节点内存
注意:此字段可能返回 null,表示取不到有效值。
:type MemSize: int
:param MemDesc: 节点内存描述
注意:此字段可能返回 null,表示取不到有效值。
:type MemDesc: str
:param RegionId: 节点所在region
注意:此字段可能返回 null,表示取不到有效值。
:type RegionId: int
:param ZoneId: 节点所在Zone
注意:此字段可能返回 null,表示取不到有效值。
:type ZoneId: int
:param ApplyTime: 申请时间
注意:此字段可能返回 null,表示取不到有效值。
:type ApplyTime: str
:param FreeTime: 释放时间
注意:此字段可能返回 null,表示取不到有效值。
:type FreeTime: str
:param DiskSize: 硬盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type DiskSize: str
:param NameTag: 节点描述
注意:此字段可能返回 null,表示取不到有效值。
:type NameTag: str
:param Services: 节点部署服务
注意:此字段可能返回 null,表示取不到有效值。
:type Services: str
:param StorageType: 磁盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type StorageType: int
:param RootSize: 系统盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type RootSize: int
:param ChargeType: 付费类型
注意:此字段可能返回 null,表示取不到有效值。
:type ChargeType: int
:param CdbIp: 数据库IP
注意:此字段可能返回 null,表示取不到有效值。
:type CdbIp: str
:param CdbPort: 数据库端口
注意:此字段可能返回 null,表示取不到有效值。
:type CdbPort: int
:param HwDiskSize: 硬盘容量
注意:此字段可能返回 null,表示取不到有效值。
:type HwDiskSize: int
:param HwDiskSizeDesc: 硬盘容量描述
注意:此字段可能返回 null,表示取不到有效值。
:type HwDiskSizeDesc: str
:param HwMemSize: 内存容量
注意:此字段可能返回 null,表示取不到有效值。
:type HwMemSize: int
:param HwMemSizeDesc: 内存容量描述
注意:此字段可能返回 null,表示取不到有效值。
:type HwMemSizeDesc: str
:param ExpireTime: 过期时间
注意:此字段可能返回 null,表示取不到有效值。
:type ExpireTime: str
:param EmrResourceId: 节点资源ID
注意:此字段可能返回 null,表示取不到有效值。
:type EmrResourceId: str
:param IsAutoRenew: 续费标志
注意:此字段可能返回 null,表示取不到有效值。
:type IsAutoRenew: int
:param DeviceClass: 设备标识
注意:此字段可能返回 null,表示取不到有效值。
:type DeviceClass: str
:param Mutable: 支持变配
注意:此字段可能返回 null,表示取不到有效值。
:type Mutable: int
:param MCMultiDisk: 多云盘
注意:此字段可能返回 null,表示取不到有效值。
:type MCMultiDisk: list of MultiDiskMC
:param CdbNodeInfo: 数据库信息
注意:此字段可能返回 null,表示取不到有效值。
:type CdbNodeInfo: :class:`tencentcloud.emr.v20190103.models.CdbInfo`
:param Ip: 内网IP
注意:此字段可能返回 null,表示取不到有效值。
:type Ip: str
:param Destroyable: 此节点是否可销毁,1可销毁,0不可销毁
注意:此字段可能返回 null,表示取不到有效值。
:type Destroyable: int
:param Tags: 节点绑定的标签
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of Tag
:param AutoFlag: 是否是自动扩缩容节点,0为普通节点,1为自动扩缩容节点。
注意:此字段可能返回 null,表示取不到有效值。
:type AutoFlag: int
:param HardwareResourceType: 资源类型, host/pod
注意:此字段可能返回 null,表示取不到有效值。
:type HardwareResourceType: str
:param IsDynamicSpec: 是否浮动规格,1是,0否
注意:此字段可能返回 null,表示取不到有效值。
:type IsDynamicSpec: int
:param DynamicPodSpec: 浮动规格值json字符串
注意:此字段可能返回 null,表示取不到有效值。
:type DynamicPodSpec: str
"""
self.AppId = None
self.SerialNo = None
self.OrderNo = None
self.WanIp = None
self.Flag = None
self.Spec = None
self.CpuNum = None
self.MemSize = None
self.MemDesc = None
self.RegionId = None
self.ZoneId = None
self.ApplyTime = None
self.FreeTime = None
self.DiskSize = None
self.NameTag = None
self.Services = None
self.StorageType = None
self.RootSize = None
self.ChargeType = None
self.CdbIp = None
self.CdbPort = None
self.HwDiskSize = None
self.HwDiskSizeDesc = None
self.HwMemSize = None
self.HwMemSizeDesc = None
self.ExpireTime = None
self.EmrResourceId = None
self.IsAutoRenew = None
self.DeviceClass = None
self.Mutable = None
self.MCMultiDisk = None
self.CdbNodeInfo = None
self.Ip = None
self.Destroyable = None
self.Tags = None
self.AutoFlag = None
self.HardwareResourceType = None
self.IsDynamicSpec = None
self.DynamicPodSpec = None
def _deserialize(self, params):
self.AppId = params.get("AppId")
self.SerialNo = params.get("SerialNo")
self.OrderNo = params.get("OrderNo")
self.WanIp = params.get("WanIp")
self.Flag = params.get("Flag")
self.Spec = params.get("Spec")
self.CpuNum = params.get("CpuNum")
self.MemSize = params.get("MemSize")
self.MemDesc = params.get("MemDesc")
self.RegionId = params.get("RegionId")
self.ZoneId = params.get("ZoneId")
self.ApplyTime = params.get("ApplyTime")
self.FreeTime = params.get("FreeTime")
self.DiskSize = params.get("DiskSize")
self.NameTag = params.get("NameTag")
self.Services = params.get("Services")
self.StorageType = params.get("StorageType")
self.RootSize = params.get("RootSize")
self.ChargeType = params.get("ChargeType")
self.CdbIp = params.get("CdbIp")
self.CdbPort = params.get("CdbPort")
self.HwDiskSize = params.get("HwDiskSize")
self.HwDiskSizeDesc = params.get("HwDiskSizeDesc")
self.HwMemSize = params.get("HwMemSize")
self.HwMemSizeDesc = params.get("HwMemSizeDesc")
self.ExpireTime = params.get("ExpireTime")
self.EmrResourceId = params.get("EmrResourceId")
self.IsAutoRenew = params.get("IsAutoRenew")
self.DeviceClass = params.get("DeviceClass")
self.Mutable = params.get("Mutable")
if params.get("MCMultiDisk") is not None:
self.MCMultiDisk = []
for item in params.get("MCMultiDisk"):
obj = MultiDiskMC()
obj._deserialize(item)
self.MCMultiDisk.append(obj)
if params.get("CdbNodeInfo") is not None:
self.CdbNodeInfo = CdbInfo()
self.CdbNodeInfo._deserialize(params.get("CdbNodeInfo"))
self.Ip = params.get("Ip")
self.Destroyable = params.get("Destroyable")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.AutoFlag = params.get("AutoFlag")
self.HardwareResourceType = params.get("HardwareResourceType")
self.IsDynamicSpec = params.get("IsDynamicSpec")
self.DynamicPodSpec = params.get("DynamicPodSpec")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class OutterResource(AbstractModel):
"""资源详情
"""
def __init__(self):
"""
:param Spec: 规格
注意:此字段可能返回 null,表示取不到有效值。
:type Spec: str
:param SpecName: 规格名
注意:此字段可能返回 null,表示取不到有效值。
:type SpecName: str
:param StorageType: 硬盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type StorageType: int
:param DiskType: 硬盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type DiskType: str
:param RootSize: 系统盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type RootSize: int
:param MemSize: 内存大小
注意:此字段可能返回 null,表示取不到有效值。
:type MemSize: int
:param Cpu: CPU个数
注意:此字段可能返回 null,表示取不到有效值。
:type Cpu: int
:param DiskSize: 硬盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type DiskSize: int
:param InstanceType: 规格
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceType: str
"""
self.Spec = None
self.SpecName = None
self.StorageType = None
self.DiskType = None
self.RootSize = None
self.MemSize = None
self.Cpu = None
self.DiskSize = None
self.InstanceType = None
def _deserialize(self, params):
self.Spec = params.get("Spec")
self.SpecName = params.get("SpecName")
self.StorageType = params.get("StorageType")
self.DiskType = params.get("DiskType")
self.RootSize = params.get("RootSize")
self.MemSize = params.get("MemSize")
self.Cpu = params.get("Cpu")
self.DiskSize = params.get("DiskSize")
self.InstanceType = params.get("InstanceType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PersistentVolumeContext(AbstractModel):
"""Pod PVC存储方式描述
"""
def __init__(self):
"""
:param DiskSize: 磁盘大小,单位为GB。
注意:此字段可能返回 null,表示取不到有效值。
:type DiskSize: int
:param DiskType: 磁盘类型。CLOUD_PREMIUM;CLOUD_SSD
注意:此字段可能返回 null,表示取不到有效值。
:type DiskType: str
:param DiskNum: 磁盘数量
注意:此字段可能返回 null,表示取不到有效值。
:type DiskNum: int
"""
self.DiskSize = None
self.DiskType = None
self.DiskNum = None
def _deserialize(self, params):
self.DiskSize = params.get("DiskSize")
self.DiskType = params.get("DiskType")
self.DiskNum = params.get("DiskNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Placement(AbstractModel):
"""描述集群实例位置信息
"""
def __init__(self):
"""
:param ProjectId: 实例所属项目ID。该参数可以通过调用 DescribeProject 的返回值中的 projectId 字段来获取。填0为默认项目。
:type ProjectId: int
:param Zone: 实例所属的可用区,例如ap-guangzhou-1。该参数也可以通过调用 DescribeZones 的返回值中的Zone字段来获取。
:type Zone: str
"""
self.ProjectId = None
self.Zone = None
def _deserialize(self, params):
self.ProjectId = params.get("ProjectId")
self.Zone = params.get("Zone")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PodParameter(AbstractModel):
"""POD自定义权限和自定义参数
"""
def __init__(self):
"""
:param ClusterId: TKE或EKS集群ID
:type ClusterId: str
:param Config: 自定义权限
:type Config: str
:param Parameter: 自定义参数
:type Parameter: str
"""
self.ClusterId = None
self.Config = None
self.Parameter = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.Config = params.get("Config")
self.Parameter = params.get("Parameter")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PodSpec(AbstractModel):
"""扩容容器资源时的资源描述
"""
def __init__(self):
"""
:param ResourceProviderIdentifier: 外部资源提供者的标识符,例如"cls-a1cd23fa"。
:type ResourceProviderIdentifier: str
:param ResourceProviderType: 外部资源提供者类型,例如"tke",当前仅支持"tke"。
:type ResourceProviderType: str
:param NodeType: 资源的用途,即节点类型,当前仅支持"TASK"。
:type NodeType: str
:param Cpu: CPU核数。
:type Cpu: int
:param Memory: 内存大小,单位为GB。
:type Memory: int
:param DataVolumes: 资源对宿主机的挂载点,指定的挂载点对应了宿主机的路径,该挂载点在Pod中作为数据存储目录使用。弃用
:type DataVolumes: list of str
:param CpuType: Eks集群-CPU类型,当前支持"intel"和"amd"
:type CpuType: str
:param PodVolumes: Pod节点数据目录挂载信息。
:type PodVolumes: list of PodVolume
:param IsDynamicSpec: 是否浮动规格,1是,0否
:type IsDynamicSpec: int
:param DynamicPodSpec: 浮动规格
注意:此字段可能返回 null,表示取不到有效值。
:type DynamicPodSpec: :class:`tencentcloud.emr.v20190103.models.DynamicPodSpec`
"""
self.ResourceProviderIdentifier = None
self.ResourceProviderType = None
self.NodeType = None
self.Cpu = None
self.Memory = None
self.DataVolumes = None
self.CpuType = None
self.PodVolumes = None
self.IsDynamicSpec = None
self.DynamicPodSpec = None
def _deserialize(self, params):
self.ResourceProviderIdentifier = params.get("ResourceProviderIdentifier")
self.ResourceProviderType = params.get("ResourceProviderType")
self.NodeType = params.get("NodeType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.DataVolumes = params.get("DataVolumes")
self.CpuType = params.get("CpuType")
if params.get("PodVolumes") is not None:
self.PodVolumes = []
for item in params.get("PodVolumes"):
obj = PodVolume()
obj._deserialize(item)
self.PodVolumes.append(obj)
self.IsDynamicSpec = params.get("IsDynamicSpec")
if params.get("DynamicPodSpec") is not None:
self.DynamicPodSpec = DynamicPodSpec()
self.DynamicPodSpec._deserialize(params.get("DynamicPodSpec"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PodVolume(AbstractModel):
"""Pod的存储设备描述信息。
"""
def __init__(self):
"""
:param VolumeType: 存储类型,可为"pvc","hostpath"。
注意:此字段可能返回 null,表示取不到有效值。
:type VolumeType: str
:param PVCVolume: 当VolumeType为"pvc"时,该字段生效。
注意:此字段可能返回 null,表示取不到有效值。
:type PVCVolume: :class:`tencentcloud.emr.v20190103.models.PersistentVolumeContext`
:param HostVolume: 当VolumeType为"hostpath"时,该字段生效。
注意:此字段可能返回 null,表示取不到有效值。
:type HostVolume: :class:`tencentcloud.emr.v20190103.models.HostVolumeContext`
"""
self.VolumeType = None
self.PVCVolume = None
self.HostVolume = None
def _deserialize(self, params):
self.VolumeType = params.get("VolumeType")
if params.get("PVCVolume") is not None:
self.PVCVolume = PersistentVolumeContext()
self.PVCVolume._deserialize(params.get("PVCVolume"))
if params.get("HostVolume") is not None:
self.HostVolume = HostVolumeContext()
self.HostVolume._deserialize(params.get("HostVolume"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PreExecuteFileSettings(AbstractModel):
"""预执行脚本配置
"""
def __init__(self):
"""
:param Path: 脚本在COS上路径,已废弃
:type Path: str
:param Args: 执行脚本参数
:type Args: list of str
:param Bucket: COS的Bucket名称,已废弃
:type Bucket: str
:param Region: COS的Region名称,已废弃
:type Region: str
:param Domain: COS的Domain数据,已废弃
:type Domain: str
:param RunOrder: 执行顺序
:type RunOrder: int
:param WhenRun: resourceAfter 或 clusterAfter
:type WhenRun: str
:param CosFileName: 脚本文件名,已废弃
:type CosFileName: str
:param CosFileURI: 脚本的cos地址
:type CosFileURI: str
:param CosSecretId: cos的SecretId
:type CosSecretId: str
:param CosSecretKey: Cos的SecretKey
:type CosSecretKey: str
:param AppId: cos的appid,已废弃
:type AppId: str
"""
self.Path = None
self.Args = None
self.Bucket = None
self.Region = None
self.Domain = None
self.RunOrder = None
self.WhenRun = None
self.CosFileName = None
self.CosFileURI = None
self.CosSecretId = None
self.CosSecretKey = None
self.AppId = None
def _deserialize(self, params):
self.Path = params.get("Path")
self.Args = params.get("Args")
self.Bucket = params.get("Bucket")
self.Region = params.get("Region")
self.Domain = params.get("Domain")
self.RunOrder = params.get("RunOrder")
self.WhenRun = params.get("WhenRun")
self.CosFileName = params.get("CosFileName")
self.CosFileURI = params.get("CosFileURI")
self.CosSecretId = params.get("CosSecretId")
self.CosSecretKey = params.get("CosSecretKey")
self.AppId = params.get("AppId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class PriceResource(AbstractModel):
"""询价资源
"""
def __init__(self):
"""
:param Spec: 需要的规格
注意:此字段可能返回 null,表示取不到有效值。
:type Spec: str
:param StorageType: 硬盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type StorageType: int
:param DiskType: 硬盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type DiskType: str
:param RootSize: 系统盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type RootSize: int
:param MemSize: 内存大小
注意:此字段可能返回 null,表示取不到有效值。
:type MemSize: int
:param Cpu: 核心数量
注意:此字段可能返回 null,表示取不到有效值。
:type Cpu: int
:param DiskSize: 硬盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type DiskSize: int
:param MultiDisks: 云盘列表
注意:此字段可能返回 null,表示取不到有效值。
:type MultiDisks: list of MultiDisk
:param DiskCnt: 磁盘数量
注意:此字段可能返回 null,表示取不到有效值。
:type DiskCnt: int
:param InstanceType: 规格
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceType: str
:param Tags: 标签
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of Tag
:param DiskNum: 磁盘数量
注意:此字段可能返回 null,表示取不到有效值。
:type DiskNum: int
:param LocalDiskNum: 本地盘的数量
注意:此字段可能返回 null,表示取不到有效值。
:type LocalDiskNum: int
"""
self.Spec = None
self.StorageType = None
self.DiskType = None
self.RootSize = None
self.MemSize = None
self.Cpu = None
self.DiskSize = None
self.MultiDisks = None
self.DiskCnt = None
self.InstanceType = None
self.Tags = None
self.DiskNum = None
self.LocalDiskNum = None
def _deserialize(self, params):
self.Spec = params.get("Spec")
self.StorageType = params.get("StorageType")
self.DiskType = params.get("DiskType")
self.RootSize = params.get("RootSize")
self.MemSize = params.get("MemSize")
self.Cpu = params.get("Cpu")
self.DiskSize = params.get("DiskSize")
if params.get("MultiDisks") is not None:
self.MultiDisks = []
for item in params.get("MultiDisks"):
obj = MultiDisk()
obj._deserialize(item)
self.MultiDisks.append(obj)
self.DiskCnt = params.get("DiskCnt")
self.InstanceType = params.get("InstanceType")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.DiskNum = params.get("DiskNum")
self.LocalDiskNum = params.get("LocalDiskNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RenewInstancesInfo(AbstractModel):
"""集群续费实例信息
"""
def __init__(self):
"""
:param EmrResourceId: 节点资源ID
:type EmrResourceId: str
:param Flag: 节点类型。0:common节点;1:master节点
;2:core节点;3:task节点
:type Flag: int
:param Ip: 内网IP
:type Ip: str
:param MemDesc: 节点内存描述
:type MemDesc: str
:param CpuNum: 节点核数
:type CpuNum: int
:param DiskSize: 硬盘大小
:type DiskSize: str
:param ExpireTime: 过期时间
:type ExpireTime: str
:param Spec: 节点规格
:type Spec: str
:param StorageType: 磁盘类型
:type StorageType: int
"""
self.EmrResourceId = None
self.Flag = None
self.Ip = None
self.MemDesc = None
self.CpuNum = None
self.DiskSize = None
self.ExpireTime = None
self.Spec = None
self.StorageType = None
def _deserialize(self, params):
self.EmrResourceId = params.get("EmrResourceId")
self.Flag = params.get("Flag")
self.Ip = params.get("Ip")
self.MemDesc = params.get("MemDesc")
self.CpuNum = params.get("CpuNum")
self.DiskSize = params.get("DiskSize")
self.ExpireTime = params.get("ExpireTime")
self.Spec = params.get("Spec")
self.StorageType = params.get("StorageType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Resource(AbstractModel):
"""资源详情
"""
def __init__(self):
"""
:param Spec: 节点规格描述
注意:此字段可能返回 null,表示取不到有效值。
:type Spec: str
:param StorageType: 存储类型
注意:此字段可能返回 null,表示取不到有效值。
:type StorageType: int
:param DiskType: 磁盘类型
注意:此字段可能返回 null,表示取不到有效值。
:type DiskType: str
:param MemSize: 内存容量,单位为M
注意:此字段可能返回 null,表示取不到有效值。
:type MemSize: int
:param Cpu: CPU核数
注意:此字段可能返回 null,表示取不到有效值。
:type Cpu: int
:param DiskSize: 数据盘容量
注意:此字段可能返回 null,表示取不到有效值。
:type DiskSize: int
:param RootSize: 系统盘容量
注意:此字段可能返回 null,表示取不到有效值。
:type RootSize: int
:param MultiDisks: 云盘列表,当数据盘为一块云盘时,直接使用DiskType和DiskSize参数,超出部分使用MultiDisks
注意:此字段可能返回 null,表示取不到有效值。
:type MultiDisks: list of MultiDisk
:param Tags: 需要绑定的标签列表
注意:此字段可能返回 null,表示取不到有效值。
:type Tags: list of Tag
:param InstanceType: 规格类型
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceType: str
:param LocalDiskNum: 本地盘数量
注意:此字段可能返回 null,表示取不到有效值。
:type LocalDiskNum: int
:param DiskNum: 盘数量
注意:此字段可能返回 null,表示取不到有效值。
:type DiskNum: int
"""
self.Spec = None
self.StorageType = None
self.DiskType = None
self.MemSize = None
self.Cpu = None
self.DiskSize = None
self.RootSize = None
self.MultiDisks = None
self.Tags = None
self.InstanceType = None
self.LocalDiskNum = None
self.DiskNum = None
def _deserialize(self, params):
self.Spec = params.get("Spec")
self.StorageType = params.get("StorageType")
self.DiskType = params.get("DiskType")
self.MemSize = params.get("MemSize")
self.Cpu = params.get("Cpu")
self.DiskSize = params.get("DiskSize")
self.RootSize = params.get("RootSize")
if params.get("MultiDisks") is not None:
self.MultiDisks = []
for item in params.get("MultiDisks"):
obj = MultiDisk()
obj._deserialize(item)
self.MultiDisks.append(obj)
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.InstanceType = params.get("InstanceType")
self.LocalDiskNum = params.get("LocalDiskNum")
self.DiskNum = params.get("DiskNum")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RunJobFlowRequest(AbstractModel):
"""RunJobFlow请求参数结构体
"""
def __init__(self):
"""
:param Name: 作业名称。
:type Name: str
:param CreateCluster: 是否新创建集群。
true,新创建集群,则使用Instance中的参数进行集群创建。
false,使用已有集群,则通过InstanceId传入。
:type CreateCluster: bool
:param Steps: 作业流程执行步骤。
:type Steps: list of Step
:param InstancePolicy: 作业流程正常完成时,集群的处理方式,可选择:
Terminate 销毁集群。
Reserve 保留集群。
:type InstancePolicy: str
:param ProductVersion: 只有CreateCluster为true时生效,目前只支持EMR版本,例如EMR-2.2.0,不支持ClickHouse和Druid版本。
:type ProductVersion: str
:param SecurityClusterFlag: 只在CreateCluster为true时生效。
true 表示安装kerberos,false表示不安装kerberos。
:type SecurityClusterFlag: bool
:param Software: 只在CreateCluster为true时生效。
新建集群时,要安装的软件列表。
:type Software: list of str
:param BootstrapActions: 引导脚本。
:type BootstrapActions: list of BootstrapAction
:param Configurations: 指定配置创建集群。
:type Configurations: list of Configuration
:param LogUri: 作业日志保存地址。
:type LogUri: str
:param InstanceId: 只在CreateCluster为false时生效。
:type InstanceId: str
:param ApplicationRole: 自定义应用角色,大数据应用访问外部服务时使用的角色,默认为"EME_QCSRole"。
:type ApplicationRole: str
:param ClientToken: 重入标签,用来可重入检查,防止在一段时间内,创建相同的流程作业。
:type ClientToken: str
:param Instance: 只在CreateCluster为true时生效,使用该配置创建集群。
:type Instance: :class:`tencentcloud.emr.v20190103.models.ClusterSetting`
"""
self.Name = None
self.CreateCluster = None
self.Steps = None
self.InstancePolicy = None
self.ProductVersion = None
self.SecurityClusterFlag = None
self.Software = None
self.BootstrapActions = None
self.Configurations = None
self.LogUri = None
self.InstanceId = None
self.ApplicationRole = None
self.ClientToken = None
self.Instance = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.CreateCluster = params.get("CreateCluster")
if params.get("Steps") is not None:
self.Steps = []
for item in params.get("Steps"):
obj = Step()
obj._deserialize(item)
self.Steps.append(obj)
self.InstancePolicy = params.get("InstancePolicy")
self.ProductVersion = params.get("ProductVersion")
self.SecurityClusterFlag = params.get("SecurityClusterFlag")
self.Software = params.get("Software")
if params.get("BootstrapActions") is not None:
self.BootstrapActions = []
for item in params.get("BootstrapActions"):
obj = BootstrapAction()
obj._deserialize(item)
self.BootstrapActions.append(obj)
if params.get("Configurations") is not None:
self.Configurations = []
for item in params.get("Configurations"):
obj = Configuration()
obj._deserialize(item)
self.Configurations.append(obj)
self.LogUri = params.get("LogUri")
self.InstanceId = params.get("InstanceId")
self.ApplicationRole = params.get("ApplicationRole")
self.ClientToken = params.get("ClientToken")
if params.get("Instance") is not None:
self.Instance = ClusterSetting()
self.Instance._deserialize(params.get("Instance"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class RunJobFlowResponse(AbstractModel):
"""RunJobFlow返回参数结构体
"""
def __init__(self):
"""
:param JobFlowId: 作业流程ID。
:type JobFlowId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.JobFlowId = None
self.RequestId = None
def _deserialize(self, params):
self.JobFlowId = params.get("JobFlowId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ScaleOutInstanceRequest(AbstractModel):
"""ScaleOutInstance请求参数结构体
"""
def __init__(self):
"""
:param TimeUnit: 扩容的时间单位。取值范围:
<li>s:表示秒。PayMode取值为0时,TimeUnit只能取值为s。</li>
<li>m:表示月份。PayMode取值为1时,TimeUnit只能取值为m。</li>
:type TimeUnit: str
:param TimeSpan: 扩容的时长。结合TimeUnit一起使用。
<li>TimeUnit为s时,该参数只能填写3600,表示按量计费实例。</li>
<li>TimeUnit为m时,该参数填写的数字表示包年包月实例的购买时长,如1表示购买一个月</li>
:type TimeSpan: int
:param InstanceId: 实例ID。
:type InstanceId: str
:param PayMode: 实例计费模式。取值范围:
<li>0:表示按量计费。</li>
<li>1:表示包年包月。</li>
:type PayMode: int
:param ClientToken: 客户端Token。
:type ClientToken: str
:param PreExecutedFileSettings: 引导操作脚本设置。
:type PreExecutedFileSettings: list of PreExecuteFileSettings
:param TaskCount: 扩容的Task节点数量。
:type TaskCount: int
:param CoreCount: 扩容的Core节点数量。
:type CoreCount: int
:param UnNecessaryNodeList: 扩容时不需要安装的进程。
:type UnNecessaryNodeList: list of int non-negative
:param RouterCount: 扩容的Router节点数量。
:type RouterCount: int
:param SoftDeployInfo: 部署的服务。
<li>SoftDeployInfo和ServiceNodeInfo是同组参数,和UnNecessaryNodeList参数互斥。</li>
<li>建议使用SoftDeployInfo和ServiceNodeInfo组合。</li>
:type SoftDeployInfo: list of int non-negative
:param ServiceNodeInfo: 启动的进程。
:type ServiceNodeInfo: list of int non-negative
:param DisasterRecoverGroupIds: 分散置放群组ID列表,当前仅支持指定一个。
:type DisasterRecoverGroupIds: list of str
:param Tags: 扩容节点绑定标签列表。
:type Tags: list of Tag
:param HardwareResourceType: 扩容所选资源类型,可选范围为"host","pod",host为普通的CVM资源,Pod为TKE集群提供的资源
:type HardwareResourceType: str
:param PodSpec: 使用Pod资源扩容时,指定的Pod规格以及来源等信息
:type PodSpec: :class:`tencentcloud.emr.v20190103.models.PodSpec`
:param ClickHouseClusterName: 使用clickhouse集群扩容时,选择的机器分组名称
:type ClickHouseClusterName: str
:param ClickHouseClusterType: 使用clickhouse集群扩容时,选择的机器分组类型。new为新增,old为选择旧分组
:type ClickHouseClusterType: str
:param YarnNodeLabel: 规则扩容指定 yarn node label
:type YarnNodeLabel: str
:param PodParameter: POD自定义权限和自定义参数
:type PodParameter: :class:`tencentcloud.emr.v20190103.models.PodParameter`
:param MasterCount: 扩容的Master节点的数量。
:type MasterCount: int
"""
self.TimeUnit = None
self.TimeSpan = None
self.InstanceId = None
self.PayMode = None
self.ClientToken = None
self.PreExecutedFileSettings = None
self.TaskCount = None
self.CoreCount = None
self.UnNecessaryNodeList = None
self.RouterCount = None
self.SoftDeployInfo = None
self.ServiceNodeInfo = None
self.DisasterRecoverGroupIds = None
self.Tags = None
self.HardwareResourceType = None
self.PodSpec = None
self.ClickHouseClusterName = None
self.ClickHouseClusterType = None
self.YarnNodeLabel = None
self.PodParameter = None
self.MasterCount = None
def _deserialize(self, params):
self.TimeUnit = params.get("TimeUnit")
self.TimeSpan = params.get("TimeSpan")
self.InstanceId = params.get("InstanceId")
self.PayMode = params.get("PayMode")
self.ClientToken = params.get("ClientToken")
if params.get("PreExecutedFileSettings") is not None:
self.PreExecutedFileSettings = []
for item in params.get("PreExecutedFileSettings"):
obj = PreExecuteFileSettings()
obj._deserialize(item)
self.PreExecutedFileSettings.append(obj)
self.TaskCount = params.get("TaskCount")
self.CoreCount = params.get("CoreCount")
self.UnNecessaryNodeList = params.get("UnNecessaryNodeList")
self.RouterCount = params.get("RouterCount")
self.SoftDeployInfo = params.get("SoftDeployInfo")
self.ServiceNodeInfo = params.get("ServiceNodeInfo")
self.DisasterRecoverGroupIds = params.get("DisasterRecoverGroupIds")
if params.get("Tags") is not None:
self.Tags = []
for item in params.get("Tags"):
obj = Tag()
obj._deserialize(item)
self.Tags.append(obj)
self.HardwareResourceType = params.get("HardwareResourceType")
if params.get("PodSpec") is not None:
self.PodSpec = PodSpec()
self.PodSpec._deserialize(params.get("PodSpec"))
self.ClickHouseClusterName = params.get("ClickHouseClusterName")
self.ClickHouseClusterType = params.get("ClickHouseClusterType")
self.YarnNodeLabel = params.get("YarnNodeLabel")
if params.get("PodParameter") is not None:
self.PodParameter = PodParameter()
self.PodParameter._deserialize(params.get("PodParameter"))
self.MasterCount = params.get("MasterCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class ScaleOutInstanceResponse(AbstractModel):
"""ScaleOutInstance返回参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param DealNames: 订单号。
注意:此字段可能返回 null,表示取不到有效值。
:type DealNames: list of str
:param ClientToken: 客户端Token。
注意:此字段可能返回 null,表示取不到有效值。
:type ClientToken: str
:param FlowId: 扩容流程ID。
注意:此字段可能返回 null,表示取不到有效值。
:type FlowId: int
:param BillId: 大订单号。
注意:此字段可能返回 null,表示取不到有效值。
:type BillId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.InstanceId = None
self.DealNames = None
self.ClientToken = None
self.FlowId = None
self.BillId = None
self.RequestId = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.DealNames = params.get("DealNames")
self.ClientToken = params.get("ClientToken")
self.FlowId = params.get("FlowId")
self.BillId = params.get("BillId")
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class SearchItem(AbstractModel):
"""搜索字段
"""
def __init__(self):
"""
:param SearchType: 支持搜索的类型
:type SearchType: str
:param SearchValue: 支持搜索的值
:type SearchValue: str
"""
self.SearchType = None
self.SearchValue = None
def _deserialize(self, params):
self.SearchType = params.get("SearchType")
self.SearchValue = params.get("SearchValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Step(AbstractModel):
"""执行步骤
"""
def __init__(self):
"""
:param Name: 执行步骤名称。
:type Name: str
:param ExecutionStep: 执行动作。
:type ExecutionStep: :class:`tencentcloud.emr.v20190103.models.Execution`
:param ActionOnFailure: 执行失败策略。
1. TERMINATE_CLUSTER 执行失败时退出并销毁集群。
2. CANCEL_AND_WAIT 执行失败时阻塞等待。
3. CONTINUE 执行失败时跳过并执行后续步骤。
:type ActionOnFailure: str
:param User: 指定执行Step时的用户名,非必须,默认为hadoop。
:type User: str
"""
self.Name = None
self.ExecutionStep = None
self.ActionOnFailure = None
self.User = None
def _deserialize(self, params):
self.Name = params.get("Name")
if params.get("ExecutionStep") is not None:
self.ExecutionStep = Execution()
self.ExecutionStep._deserialize(params.get("ExecutionStep"))
self.ActionOnFailure = params.get("ActionOnFailure")
self.User = params.get("User")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class Tag(AbstractModel):
"""标签
"""
def __init__(self):
"""
:param TagKey: 标签键
:type TagKey: str
:param TagValue: 标签值
:type TagValue: str
"""
self.TagKey = None
self.TagValue = None
def _deserialize(self, params):
self.TagKey = params.get("TagKey")
self.TagValue = params.get("TagValue")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateInstanceRequest(AbstractModel):
"""TerminateInstance请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param ResourceIds: 销毁节点ID。该参数为预留参数,用户无需配置。
:type ResourceIds: list of str
"""
self.InstanceId = None
self.ResourceIds = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.ResourceIds = params.get("ResourceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateInstanceResponse(AbstractModel):
"""TerminateInstance返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateTasksRequest(AbstractModel):
"""TerminateTasks请求参数结构体
"""
def __init__(self):
"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param ResourceIds: 待销毁节点的资源ID列表。资源ID形如:emr-vm-xxxxxxxx。有效的资源ID可通过登录[控制台](https://console.cloud.tencent.com/emr/static/hardware)查询。
:type ResourceIds: list of str
"""
self.InstanceId = None
self.ResourceIds = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.ResourceIds = params.get("ResourceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class TerminateTasksResponse(AbstractModel):
"""TerminateTasks返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class UpdateInstanceSettings(AbstractModel):
"""变配资源规格
"""
def __init__(self):
"""
:param Memory: 内存容量,单位为G
:type Memory: int
:param CPUCores: CPU核数
:type CPUCores: int
:param ResourceId: 机器资源ID(EMR测资源标识)
:type ResourceId: str
:param InstanceType: 变配机器规格
:type InstanceType: str
"""
self.Memory = None
self.CPUCores = None
self.ResourceId = None
self.InstanceType = None
def _deserialize(self, params):
self.Memory = params.get("Memory")
self.CPUCores = params.get("CPUCores")
self.ResourceId = params.get("ResourceId")
self.InstanceType = params.get("InstanceType")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class VPCSettings(AbstractModel):
"""VPC 参数
"""
def __init__(self):
"""
:param VpcId: VPC ID
:type VpcId: str
:param SubnetId: Subnet ID
:type SubnetId: str
"""
self.VpcId = None
self.SubnetId = None
def _deserialize(self, params):
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
| [
"liugngg.gmail.com"
] | liugngg.gmail.com |
04e4e7fb2beff305b6436fb10b8bcb32563735f2 | d305e9667f18127e4a1d4d65e5370cf60df30102 | /tests/st/ops/ascend/test_aicpu_ops/test_fused_sparse_ftrl.py | f48235fbcabe2fb6abd5074475895f2115054473 | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 1,924 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.nn as nn
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.common.parameter import Parameter
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
lr = 0.01
l1 = 0.0
l2 = 0.0
lr_power = -0.5
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.fused_sparse_ftrl = P.FusedSparseFtrl(lr=0.1, l1=0.0, l2=0.0, lr_power=-0.5)
self.var = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="var")
self.accum = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="accum")
self.linear = Parameter(Tensor(np.ones([3, 3]).astype(np.float32)), name="linear")
def construct(self, grad, indices):
return self.fused_sparse_ftrl(self.var, self.accum, self.linear, grad, indices)
def test_net():
gradient = Tensor(np.array([-3, 2, 3, 0, 0, 0, -4, -1, -2])
.reshape([3, 3]).astype(np.float32))
indices = Tensor(np.ones([3]), mstype.int32)
net = Net()
output = net(gradient, indices)
print(output)
print(net.var.data)
print(net.accum.data)
print(net.linear.data)
| [
"513344092@qq.com"
] | 513344092@qq.com |
4976b2b73dfeae906d91da69abf0aeede0d747a4 | b1ba5707a5cbe918d33bc2082b3eb4ff1378c060 | /SDPython/tests/test_sd_Katana/AccessProperties.py | 3b442e9ec2a1a19b233491f26519bb652e48bb65 | [] | no_license | qq781217732/SubstanceDev | 2eb1d9ed48d477cf70c7bfdac2103bb884e9204c | b9ffab0a1b8f3c01783259074940b2712a8142b8 | refs/heads/master | 2023-03-26T00:43:35.047305 | 2021-03-01T04:12:28 | 2021-03-01T04:12:28 | 342,539,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,539 | py | import sd
# Import the required classes.
from sd.api.sdproperty import SDPropertyCategory
from sd.api.sdvalueserializer import SDValueSerializer
# Get and print information regarding the selected nodes.
def printSelectedNodesInfo(nodes):
for node in nodes:
definition = node.getDefinition()
nodeId = node.getIdentifier()
print("node %s, id = %s" % (definition.getLabel(), nodeId))
# Create a list of each property category enumeration item.
categories = [
SDPropertyCategory.Annotation,
SDPropertyCategory.Input,
SDPropertyCategory.Output
]
# Get node properties for each property category.
for category in categories:
props = definition.getProperties(category)
# Get the label and identifier of each property.
for prop in props:
label = prop.getLabel()
propId = prop.getId()
# Get the connection for the currently accessed property.
if prop.isConnectable():
connections = node.getPropertyConnections(prop)
if connections:
print("Propery %s is connected!!!" % label)
continue
# Get the value for the currently accessed property.
value = node.getPropertyValue(prop)
if value:
print("Property %s, id = %s, value = %s" % (label, propId, SDValueSerializer.sToString(value))) | [
"gaoyuyang@senseinn.com"
] | gaoyuyang@senseinn.com |
cca40c38589af36a682c1ef7ba42167804c19b98 | 7867e319f00994767fe748a107d927cf6f3181b8 | /src/pipx/interpreter.py | 6ec093c8237356a8ce3a267b9c1c35594b1509e9 | [
"MIT"
] | permissive | pypa/pipx | c34b687f7b88fe4e7f30971c05c466f6a0f45931 | 248fa37e7a0ea4a70a30a4352c0eb065137d3e15 | refs/heads/main | 2023-08-31T04:27:29.607704 | 2023-08-29T13:39:07 | 2023-08-29T13:39:07 | 151,871,286 | 3,264 | 190 | MIT | 2023-09-05T06:23:55 | 2018-10-06T18:47:46 | Python | UTF-8 | Python | false | false | 2,564 | py | import os
import shutil
import subprocess
import sys
from typing import Optional
from pipx.constants import WINDOWS
from pipx.util import PipxError
def has_venv() -> bool:
try:
import venv # noqa
return True
except ImportError:
return False
# The following code was copied from https://github.com/uranusjr/pipx-standalone
# which uses the same technique to build a completely standalone pipx
# distribution.
#
# If we are running under the Windows embeddable distribution,
# venv isn't available (and we probably don't want to use the
# embeddable distribution as our applications' base Python anyway)
# so we try to locate the system Python and use that instead.
def find_py_launcher_python(python_version: Optional[str] = None) -> Optional[str]:
py = shutil.which("py")
if py and python_version:
py = subprocess.run(
[py, f"-{python_version}", "-c", "import sys; print(sys.executable)"],
capture_output=True,
text=True,
).stdout.strip()
return py
def _find_default_windows_python() -> str:
if has_venv():
return sys.executable
python = find_py_launcher_python() or shutil.which("python")
if python is None:
raise PipxError("No suitable Python found")
# If the path contains "WindowsApps", it's the store python
if "WindowsApps" not in python:
return python
# Special treatment to detect Windows Store stub.
# https://twitter.com/zooba/status/1212454929379581952
proc = subprocess.run(
[python, "-V"], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL
)
if proc.returncode != 0:
# Cover the 9009 return code pre-emptively.
raise PipxError("No suitable Python found")
if not proc.stdout.strip():
# A real Python should print version, Windows Store stub won't.
raise PipxError("No suitable Python found")
return python # This executable seems to work.
def _get_sys_executable() -> str:
if WINDOWS:
return _find_default_windows_python()
else:
return sys.executable
def _get_absolute_python_interpreter(env_python: str) -> str:
which_python = shutil.which(env_python)
if not which_python:
raise PipxError(f"Default python interpreter '{env_python}' is invalid.")
return which_python
env_default_python = os.environ.get("PIPX_DEFAULT_PYTHON")
if not env_default_python:
DEFAULT_PYTHON = _get_sys_executable()
else:
DEFAULT_PYTHON = _get_absolute_python_interpreter(env_default_python)
| [
"noreply@github.com"
] | pypa.noreply@github.com |
2a8e16d986f346e286ee2aae5a8909a6121c790e | d2c229f74a3ca61d6a22f64de51215d9e30c5c11 | /qiskit/circuit/library/data_preparation/__init__.py | fbd033996c9e0bdefd7806a92bb64a821df9335c | [
"Apache-2.0"
] | permissive | 1ucian0/qiskit-terra | 90e8be8a7b392fbb4b3aa9784c641a818a180e4c | 0b51250e219ca303654fc28a318c21366584ccd3 | refs/heads/main | 2023-08-31T07:50:33.568824 | 2023-08-22T01:52:53 | 2023-08-22T01:52:53 | 140,555,676 | 6 | 1 | Apache-2.0 | 2023-09-14T13:21:54 | 2018-07-11T09:52:28 | Python | UTF-8 | Python | false | false | 2,205 | py | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Data-encoding circuits
======================
In machine learning, pattern recognition and image processing, a **data-encoding circuit**
starts from an initial set of measured data and builds derived values (also known as
**features**) intended to be informative and non-redundant, facilitating the subsequent
learning and generalization steps, and in some cases leading to better human
interpretations.
A feature map is related to **dimensionality reduction**; it involves reducing the amount of
resources required to describe a large set of data. When performing analysis of complex data,
one of the major problems stems from the number of variables involved. Analysis with a large
number of variables generally requires a large amount of memory and computation power, and may
even cause a classification algorithm to overfit to training samples and generalize poorly to new
samples.
When the input data to an algorithm is too large to be processed and is suspected to be redundant
(for example, the same measurement is provided in both pounds and kilograms), then it can be
transformed into a reduced set of features, named a **feature vector**.
The process of determining a subset of the initial features is called **feature selection**.
The selected features are expected to contain the relevant information from the input data,
so that the desired task can be performed by using the reduced representation instead
of the complete initial data.
"""
from .pauli_feature_map import PauliFeatureMap
from .z_feature_map import ZFeatureMap
from .zz_feature_map import ZZFeatureMap
from .state_preparation import StatePreparation
__all__ = ["PauliFeatureMap", "ZFeatureMap", "ZZFeatureMap", "StatePreparation"]
| [
"noreply@github.com"
] | 1ucian0.noreply@github.com |
97690e0df7979136e3cb322a4672832e4770244b | ae646229187ab11607e4889e1cf0e380b26fae5c | /test_joyce_code/limestone/expPatientSize.py | 837f60f645e7ee64eb8ff96888828e8171e295f7 | [] | no_license | aschein/tensor_analysis | cb60caf56713cfb7191c46d3cc20c32ea591d382 | 155754be7fa8cfb97432997cb66aa37b1a7b582b | refs/heads/master | 2021-01-17T07:44:00.657311 | 2014-09-11T20:45:14 | 2014-09-11T20:45:14 | 34,183,143 | 1 | 2 | null | 2018-08-25T20:15:18 | 2015-04-18T21:19:08 | Python | UTF-8 | Python | false | false | 1,898 | py | """
Experiment to evaluate the effect of the size on computation time
"""
import time
import numpy as np
from sklearn.decomposition import RandomizedPCA
import nimfa
import argparse
import CP_APR
import sptensor
import sptenmat
# Load the original data
filename = 'data/hf-tensor-level1-data.dat'
X = sptensor.loadTensor(filename)
R = 40
iters=70
samples=10
pcaModel = RandomizedPCA(n_components=R)
stats = np.zeros((1, 6))
parser = argparse.ArgumentParser()
parser.add_argument("pat", type=int, help="number of patients")
args = parser.parse_args()
pn = args.pat
patList = np.arange(pn)
ix = np.in1d(X.subs[:,0].ravel(), patList)
idx = np.where(ix)[0]
xprime = sptensor.sptensor(X.subs[idx, :], X.vals[idx], [pn, X.shape[1], X.shape[2]])
flatX = sptenmat.sptenmat(xprime, [0]).tocsrmat() # matricize along the first mode
stats = np.zeros((1,6))
## NMF Timing
for k in range(samples):
startTime = time.time()
nmfModel = nimfa.mf(flatX, method="nmf", max_iter=iters, rank=R)
nmfResult = nimfa.mf_run(nmfModel)
elapsed = time.time() - startTime
stats = np.vstack((stats, np.array([R, iters, pn, k, "NMF", elapsed])))
## PCA Timing
for k in range(samples):
startTime = time.time()
pcaModel.fit(flatX)
elapsed = time.time() - startTime
stats = np.vstack((stats, np.array([R, iters, pn, k, "PCA", elapsed])))
## Tensor factorization timing
for k in range(samples):
startTime = time.time()
CP_APR.cp_apr(xprime, R, maxiters=iters)
elapsed = time.time() - startTime
stats = np.vstack((stats, np.array([R, iters, pn, k, "CP_APR", elapsed])))
stats = np.delete(stats, (0), axis=0)
outFile = "results/patient-cpu-{0}.csv".format(pn)
np.savetxt(outFile, stats, fmt="%s", delimiter="|")
print "load data local infile '/home/joyce/workspace/Health/analysis/tensor/{0}' into table comp_metrics fields terminated by '|' ;\n".format(outFile)
| [
"robchen401@gmail.com"
] | robchen401@gmail.com |
66095275cbbf3ba6d30fa9841a9b107ad96f080b | 625f2f86f2b2e07cb35204d9b3232427bf462a09 | /data/HIRun2015PbPb/HIMinimumBias2_Run263233_263293/runForestAOD_PbPb_DATA_75X.py | 231f40947da5ee5ae76e0442712abe74e3c3f970 | [] | no_license | ttrk/production | abb84c423a076fd9966276b7ed4350936c755e0b | f8a64c9c38de215802799365f0f7a99e1ee78276 | refs/heads/master | 2023-02-08T23:48:56.355141 | 2023-01-26T08:46:22 | 2023-01-26T08:46:22 | 52,877,406 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,541 | py | ### HiForest Configuration
# Collisions: pp
# Type: Data
# Input: AOD
import FWCore.ParameterSet.Config as cms
process = cms.Process('HiForest')
process.options = cms.untracked.PSet()
#####################################################################################
# HiForest labelling info
#####################################################################################
process.load("HeavyIonsAnalysis.JetAnalysis.HiForest_cff")
process.HiForest.inputLines = cms.vstring("HiForest V3",)
import subprocess
version = subprocess.Popen(["(cd $CMSSW_BASE/src && git describe --tags)"], stdout=subprocess.PIPE, shell=True).stdout.read()
if version == '':
version = 'no git info'
process.HiForest.HiForestVersion = cms.string(version)
#####################################################################################
# Input source
#####################################################################################
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
# run test job one of the files we want to process
# DAS query : file dataset=/HIMinimumBias2/HIRun2015-PromptReco-v1/AOD run=263233 lumi=427
"/store/hidata/HIRun2015/HIMinimumBias2/AOD/PromptReco-v1/000/263/233/00000/B603F936-37A7-E511-B092-02163E011928.root"
)
)
# Number of events we want to process, -1 = all events
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10))
#####################################################################################
# Load Global Tag, Geometry, etc.
#####################################################################################
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
from Configuration.AlCa.GlobalTag_condDBv2 import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
process.GlobalTag = GlobalTag(process.GlobalTag, '75X_dataRun2_v12', '') #for now track GT manually, since centrality tables updated ex post facto
process.HiForest.GlobalTagLabel = process.GlobalTag.globaltag
from HeavyIonsAnalysis.Configuration.CommonFunctions_cff import overrideJEC_PbPb5020
process = overrideJEC_PbPb5020(process)
process.load("RecoHI.HiCentralityAlgos.CentralityBin_cfi")
process.centralityBin.Centrality = cms.InputTag("hiCentrality")
process.centralityBin.centralityVariable = cms.string("HFtowers")
#####################################################################################
# Define tree output
#####################################################################################
process.TFileService = cms.Service("TFileService",
fileName=cms.string("HiForestAOD.root"))
#####################################################################################
# Additional Reconstruction and Analysis: Main Body
#####################################################################################
####################################################################################
#############################
# Jets
#############################
from Configuration.StandardSequences.ReconstructionHeavyIons_cff import voronoiBackgroundPF, voronoiBackgroundCalo
from RecoJets.JetProducers.kt4PFJets_cfi import kt4PFJets
from RecoHI.HiJetAlgos.hiFJRhoProducer import hiFJRhoProducer
process.kt4PFJets = kt4PFJets
process.hiFJRhoProducer = hiFJRhoProducer
process.kt4PFJets.src = cms.InputTag('particleFlowTmp')
process.kt4PFJets.doAreaFastjet = True
process.kt4PFJets.jetPtMin = cms.double(0.0)
process.kt4PFJets.GhostArea = cms.double(0.005)
process.load('HeavyIonsAnalysis.JetAnalysis.hiFJRhoAnalyzer_cff')
process.voronoiBackgroundPF = voronoiBackgroundPF
process.voronoiBackgroundCalo = voronoiBackgroundCalo
process.load('HeavyIonsAnalysis.JetAnalysis.jets.HiReRecoJets_HI_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu2CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs2CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs2PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu2PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCs2PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu3CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs3CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs3PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu3PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCs3PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu4CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs4CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs4PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu4PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCs4PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu5CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs5CaloJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akVs5PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akPu5PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCs5PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCsFilter4PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCsFilter5PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCsSoftDrop4PFJetSequence_PbPb_data_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.jets.akCsSoftDrop5PFJetSequence_PbPb_data_cff')
process.highPurityTracks = cms.EDFilter("TrackSelector",
src = cms.InputTag("hiGeneralTracks"),
cut = cms.string('quality("highPurity")'))
process.load("RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi")
process.offlinePrimaryVertices.TrackLabel = 'highPurityTracks'
process.jetSequences = cms.Sequence(
voronoiBackgroundPF+
voronoiBackgroundCalo+
process.kt4PFJets +
process.hiFJRhoProducer +
process.hiFJRhoAnalyzer +
process.akPu2CaloJets +
process.akPu2PFJets +
process.akVs2CaloJets +
process.akVs2PFJets +
process.akCs2PFJets +
#process.akPu3CaloJets +
#process.akPu3PFJets +
process.akVs3CaloJets +
process.akVs3PFJets +
process.akCs3PFJets +
#process.akPu4CaloJets +
#process.akPu4PFJets +
process.akVs4CaloJets +
process.akVs4PFJets +
process.akCs4PFJets +
process.akPu5CaloJets +
process.akPu5PFJets +
process.akVs5CaloJets +
process.akVs5PFJets +
process.akCs5PFJets +
process.akCsFilter4PFJets +
process.akCsFilter5PFJets +
process.akCsSoftDrop4PFJets +
process.akCsSoftDrop5PFJets +
process.highPurityTracks +
process.offlinePrimaryVertices +
process.akPu2CaloJetSequence +
process.akVs2CaloJetSequence +
process.akVs2PFJetSequence +
process.akPu2PFJetSequence +
process.akCs2PFJetSequence +
process.akPu3CaloJetSequence +
process.akVs3CaloJetSequence +
process.akVs3PFJetSequence +
process.akPu3PFJetSequence +
process.akCs3PFJetSequence +
process.akPu4CaloJetSequence +
process.akVs4CaloJetSequence +
process.akVs4PFJetSequence +
process.akPu4PFJetSequence +
process.akCs4PFJetSequence +
process.akPu5CaloJetSequence +
process.akVs5CaloJetSequence +
process.akVs5PFJetSequence +
process.akPu5PFJetSequence +
process.akCs5PFJetSequence +
process.akCsFilter4PFJetSequence +
process.akCsFilter5PFJetSequence +
process.akCsSoftDrop4PFJetSequence +
process.akCsSoftDrop5PFJetSequence
)
#####################################################################################
############################
# Event Analysis
############################
process.load('HeavyIonsAnalysis.EventAnalysis.hievtanalyzer_data_cfi')
process.load('HeavyIonsAnalysis.EventAnalysis.hltobject_PbPb_cfi')
process.load('HeavyIonsAnalysis.EventAnalysis.hltanalysis_cff')
from HeavyIonsAnalysis.EventAnalysis.dummybranches_cff import addHLTdummybranches
addHLTdummybranches(process)
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzer_cfi")
process.pfcandAnalyzer.skipCharged = False
process.pfcandAnalyzer.pfPtMin = 0
process.load("HeavyIonsAnalysis.JetAnalysis.pfcandAnalyzerCS_cfi")
process.pfcandAnalyzerCS.skipCharged = False
process.pfcandAnalyzerCS.pfPtMin = 0
process.load("HeavyIonsAnalysis.JetAnalysis.hcalNoise_cff")
#####################################################################################
#########################
# Track Analyzer
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.ExtraTrackReco_cff')
process.load('HeavyIonsAnalysis.JetAnalysis.TrkAnalyzers_cff')
# process.load("HeavyIonsAnalysis.TrackAnalysis.METAnalyzer_cff")
####################################################################################
#####################
# Photons
#####################
process.load('HeavyIonsAnalysis.PhotonAnalysis.ggHiNtuplizer_cfi')
process.ggHiNtuplizer.doGenParticles = False
process.ggHiNtuplizerGED = process.ggHiNtuplizer.clone(recoPhotonSrc = cms.InputTag('gedPhotonsTmp'),
recoPhotonHiIsolationMap = cms.InputTag('photonIsolationHIProducerGED')
)
####################################################################################
#####################
# tupel and necessary PAT sequences
#####################
process.load("HeavyIonsAnalysis.VectorBosonAnalysis.tupelSequence_PbPb_cff")
#####################################################################################
#########################
# Main analysis list
#########################
process.ana_step = cms.Path(process.hltanalysis *
process.hltobject *
process.centralityBin *
process.hiEvtAnalyzer*
process.jetSequences +
process.ggHiNtuplizer +
process.ggHiNtuplizerGED +
process.pfcandAnalyzer +
process.pfcandAnalyzerCS +
process.HiForest +
process.trackSequencesPbPb +
process.hcalNoise #+
#process.tupelPatSequence
)
#####################################################################################
#########################
# Event Selection
#########################
process.load('HeavyIonsAnalysis.JetAnalysis.EventSelection_cff')
process.pcollisionEventSelection = cms.Path(process.collisionEventSelectionAOD)
process.pHBHENoiseFilterResultProducer = cms.Path( process.HBHENoiseFilterResultProducer )
process.HBHENoiseFilterResult = cms.Path(process.fHBHENoiseFilterResult)
process.HBHENoiseFilterResultRun1 = cms.Path(process.fHBHENoiseFilterResultRun1)
process.HBHENoiseFilterResultRun2Loose = cms.Path(process.fHBHENoiseFilterResultRun2Loose)
process.HBHENoiseFilterResultRun2Tight = cms.Path(process.fHBHENoiseFilterResultRun2Tight)
process.HBHEIsoNoiseFilterResult = cms.Path(process.fHBHEIsoNoiseFilterResult)
process.pprimaryVertexFilter = cms.Path(process.primaryVertexFilter )
process.load('HeavyIonsAnalysis.Configuration.hfCoincFilter_cff')
process.phfCoincFilter1 = cms.Path(process.hfCoincFilter)
process.phfCoincFilter2 = cms.Path(process.hfCoincFilter2)
process.phfCoincFilter3 = cms.Path(process.hfCoincFilter3)
process.phfCoincFilter4 = cms.Path(process.hfCoincFilter4)
process.phfCoincFilter5 = cms.Path(process.hfCoincFilter5)
process.pclusterCompatibilityFilter = cms.Path(process.clusterCompatibilityFilter)
process.pAna = cms.EndPath(process.skimanalysis)
# Customization
##########################################UE##########################################
from CondCore.DBCommon.CondDBSetup_cfi import *
process.uetable = cms.ESSource("PoolDBESSource",
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0)
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETableCompatibilityFormat_PF_v02_offline"),
label = cms.untracked.string("UETable_PF")
),
cms.PSet(record = cms.string("JetCorrectionsRecord"),
tag = cms.string("UETableCompatibilityFormat_Calo_v02_offline"),
label = cms.untracked.string("UETable_Calo")
)
),
connect = cms.string("frontier://FrontierProd/CMS_CONDITIONS")
)
process.es_prefer_uetable = cms.ESPrefer('PoolDBESSource','uetable')
##########################################UE##########################################
| [
"tatark@mit.edu"
] | tatark@mit.edu |
201d58374ddb1f5d1ca8815b7e02ca9867fdb3a1 | 95d7291ce528ab40506d111f46c4f243b4b88514 | /backend/home/migrations/0002_load_initial_data.py | 2b9ff4b97b79ec4a4c6c2e7d002e81fe6d862ff6 | [] | no_license | crowdbotics-apps/dared-3-22630 | 8d2974d89edccddffdb39b1ad120e55fcc3092e5 | b6f8edff1e762bddf9f996ff53b72225eab1375c | refs/heads/master | 2023-01-09T18:51:28.888786 | 2020-11-14T20:53:35 | 2020-11-14T20:53:35 | 312,900,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,282 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "Dared 3"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">Dared 3</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "dared-3-22630.botics.co"
site_params = {
"name": "Dared 3",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
9928c7ec02949ebb1ffaf41dc2a1309e05a1b420 | 07fe650b914e6577b42f3214bfdec3834e6e5eee | /gdslib/coupler_ring.py | 83c169ba15dbd3d12ef28cec15e0a6c0b91d02fa | [
"MIT"
] | permissive | sequoiap/gdslib | e2379baac55bb5715183c6e68b69dc6213d009db | 3e6d081a2196e13e89fef45cae5c7de41b96a7fc | refs/heads/master | 2022-08-01T07:54:53.880323 | 2020-05-18T18:26:42 | 2020-05-18T18:26:42 | 265,635,055 | 0 | 0 | MIT | 2020-05-20T17:12:59 | 2020-05-20T17:12:59 | null | UTF-8 | Python | false | false | 629 | py | import pp
from gdslib.load import load
def coupler_ring(c=pp.c.coupler_ring, **kwargs):
""" coupler for half a ring
.. code::
N0 N1
| |
\ /
\ /
---=========---
W0 length_x E0
"""
m = load(c, **kwargs)
return m
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
wav = np.linspace(1520, 1570, 1024) * 1e-9
f = 3e8 / wav
c = coupler_ring()
s = c.s_parameters(freq=f)
plt.plot(wav, np.abs(s[:, 1] ** 2))
print(c.pins)
plt.show()
| [
"j"
] | j |
6cc4227ceafde33b34a254323ad23ffc0142a679 | 7f02a1297660601d40d5781cb7adbc2f4520029f | /macode/vae/train/atari_all.py | b0cedeff19ad6f234acaadddeec618235324a222 | [] | no_license | llach/ma-code | 2c4fb20ae1df9d457ec6736d3725104f37203824 | b8e6c279f966e6b9fadfa67731d8adb970106413 | refs/heads/master | 2020-04-16T18:34:36.242778 | 2019-07-21T13:43:03 | 2019-07-21T13:43:03 | 165,826,198 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | import tensorflow as tf
from forkan.models import VAE
from forkan.datasets import load_atari_normalized
learning_rate = 1e-4
beta = 5.5
latents = 20
for name in ['pong', 'breakout', 'boxing', 'gopher', 'upndown']:
data = load_atari_normalized(name)
v = VAE(data.shape[1:], name=name, lr=learning_rate, beta=beta, latent_dim=latents)
v.train(data, num_episodes=50, print_freq=-1)
tf.reset_default_graph()
del data
del v
| [
"llach@techfak.uni-bielefeld.de"
] | llach@techfak.uni-bielefeld.de |
6955829c9dcbcfbf6e414fa698bc84b446ebf450 | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/packt/Mastering.Python.Scientific.Computing/Chapter 8/B02092_08_14.py | 16ebc5170e994c476513f720fd77f6f03af65761 | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | #!/usr/bin/env python
import sys
for line in sys.stdin:
try:
line = line.strip()
# split the line into words
words = line.split()
# increase counters
if words[0] == "WARC-Target-URI:" :
uri = words[1].split("/")
print '%s\t%s' % (uri[0]+"//"+uri[2], 1)
except Exception:
""
#hadoop jar /usr/local/apache/hadoop2/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -file /mapper.py -mapper /mapper.py -file /reducer.py -reducer /reducer.py -input /text.txt -output /output
| [
"xenron@outlook.com"
] | xenron@outlook.com |
3767bcf0f0bfe6b74fb52776c215a52ebbceecfe | 7506c49859870af9e62c3e919857ffcdf2e9a19e | /book2/Seq2SeqLearning/statistic_word5.py | 66fde97061d026ee8ff261cf27241b565937ef27 | [] | no_license | Git2191866109/BookStudy | d363717285a5e9767e582f6efd1258680fa26f80 | f172244218871372ca94286c3db64cf334627ef3 | refs/heads/master | 2022-11-08T00:15:00.963332 | 2020-06-28T10:28:33 | 2020-06-28T10:28:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,744 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# coding=utf-8
"""
@author: Li Tian
@contact: 694317828@qq.com
@software: pycharm
@file: statistic_word5.py
@time: 2019/4/29 10:39
@desc: 用tf.while_loop来实现解码过程
"""
import tensorflow as tf
import codecs
# 读取checkpoint的路径。9000表示是训练程序在第9000步保存的checkpoint
CHECKPOINT_PATH = "./new_seq2seq_ckpt"
# 模型参数。必须与训练时的模型参数保持一致。
# LSTM的隐藏层规模
HIDDEN_SIZE = 1024
# 深层循环神经网络中LSTM结构的层数
NUM_LAYERS = 2
# 源语言词汇表大小
SRC_VOCAB_SIZE = 10000
# 目标语言词汇表大小
TRG_VOCAB_SIZE = 4000
# 在Softmax层和词向量层之间共享参数
SHARE_EMB_AND_SOFTMAX = True
# 词汇表中<sos>和<eos>的ID。在解码过程中需要用<sos>作为第一步的输入,并将检查是否是<eos>,因此需要知道这两个符号的ID
SOS_ID = 1
EOS_ID = 2
# 词汇表文件
SRC_VOCAB = "en.vocab"
TRG_VOCAB = "zh.vocab"
# 定义NMTModel类来描述模型
class NMTModel(object):
# 在模型的初始化函数中定义模型要用到的变量
def __init__(self):
# 与训练时的__init__函数相同。通常在训练程序和解码程序中复用NMTModel类以及__init__函数,以确保解码时和训练时定义的变量是相同的
# 定义编码器和解码器所使用的LSTM结构
self.enc_cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])
self.dec_cell = tf.nn.rnn_cell.MultiRNNCell([tf.nn.rnn_cell.BasicLSTMCell(HIDDEN_SIZE) for _ in range(NUM_LAYERS)])
# 为源语言和目标语言分别定义词向量
self.src_embedding = tf.get_variable("src_emb", [SRC_VOCAB_SIZE, HIDDEN_SIZE])
self.trg_embedding = tf.get_variable("trg_emb", [TRG_VOCAB_SIZE, HIDDEN_SIZE])
# 定义softmax层的变量
if SHARE_EMB_AND_SOFTMAX:
self.softmax_weight = tf.transpose(self.trg_embedding)
else:
self.softmax_weight = tf.get_variable("weight", [HIDDEN_SIZE, TRG_VOCAB_SIZE])
self.softmax_bias = tf.get_variable("softmax_bias", [TRG_VOCAB_SIZE])
def inference(self, src_input):
# 虽然输入只有一个句子,但因为dynamic_rnn要求输入是batch的形式,因此这里将输入句子整理为大小为1的batch
src_size = tf.convert_to_tensor([len(src_input)], dtype=tf.int32)
src_input = tf.convert_to_tensor([src_input], dtype=tf.int32)
src_emb = tf.nn.embedding_lookup(self.src_embedding, src_input)
# 使用dynamic_rnn构造编码器。这一步与训练时相同
with tf.variable_scope("encoder"):
enc_outputs, enc_state = tf.nn.dynamic_rnn(self.enc_cell, src_emb, src_size, dtype=tf.float32)
# 设置解码的最大步数。这是为了避免在极端情况出现无限循环的问题。
MAX_DEC_LEN = 100
with tf.variable_scope("decoder/rnn/multi_rnn_cell"):
# 使用一个变长的TensorArray来存储生成的句子
init_array = tf.TensorArray(dtype=tf.int32, size=0, dynamic_size=True, clear_after_read=False)
# 填入第一个单词<sos>作为解码器的输入
init_array = init_array.write(0, SOS_ID)
# 构建初始的循环状态。循环状态包含循环神经网络的隐藏状态,保存生成句子的TensorArray,以及记录解码步数的一个整数step
init_loop_var = (enc_state, init_array, 0)
# tf.while_loop的循环条件
# 循环直到解码器输出<eos>,或者达到最大步数为止。
def continue_loop_condition(state, trg_ids, step):
return tf.reduce_all(tf.logical_and(tf.not_equal(trg_ids.read(step), EOS_ID), tf.less(step, MAX_DEC_LEN-1)))
def loop_body(state, trg_ids, step):
# 读取最后一步输出的单词,并读取其词向量
trg_input = [trg_ids.read(step)]
trg_emb = tf.nn.embedding_lookup(self.trg_embedding, trg_input)
# 这里不使用dynamic_rnn,而是直接调用dec_cell向前计算一步。
dec_outputs, next_state = self.dec_cell.call(state=state, inputs=trg_emb)
# 计算每个可能的输出单词对应的logit,并选取logit值最大的单词作为这一步的输出。
output = tf.reshape(dec_outputs, [-1, HIDDEN_SIZE])
logits = (tf.matmul(output, self.softmax_weight) + self.softmax_bias)
next_id = tf.argmax(logits, axis=1, output_type=tf.int32)
# 将这一步输出的单词写入循环状态的trg_ids中
trg_ids = trg_ids.write(step+1, next_id[0])
return next_state, trg_ids, step+1
# 执行tf.while_loop,返回最终状态
state, trg_ids, step = tf.while_loop(continue_loop_condition, loop_body, init_loop_var)
return trg_ids.stack()
def main():
# 定义训练用的循环神经网络模型
with tf.variable_scope("nmt_model", reuse=None):
model = NMTModel()
# 定义一个测试的例子
test_sentence = "This is a test ."
print(test_sentence)
# 根据英文词汇表,将测试句子转为单词ID。结尾加上<eos>的编号
test_sentence = test_sentence + " <eos>"
with codecs.open(SRC_VOCAB, 'r', 'utf-8') as vocab:
src_vocab = [w.strip() for w in vocab.readlines()]
# 运用dict,将单词和id对应起来组成字典,用于后面的转换
src_id_dict = dict((src_vocab[x], x) for x in range(SRC_VOCAB_SIZE))
test_en_ids = [(src_id_dict[en_text] if en_text in src_id_dict else src_id_dict['<unk>'])
for en_text in test_sentence.split()]
print(test_en_ids)
# 建立解码所需的计算图
output_op = model.inference(test_en_ids)
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, CHECKPOINT_PATH)
# 读取翻译结果
output_ids = sess.run(output_op)
print(output_ids)
# 根据中文词汇表,将翻译结果转换为中文文字。
with codecs.open(TRG_VOCAB, "r", "utf-8") as f_vocab:
trg_vocab = [w.strip() for w in f_vocab.readlines()]
output_text = ''.join([trg_vocab[x] for x in output_ids[1:-1]])
# 输出翻译结果
print(output_text)
sess.close()
if __name__ == "__main__":
main() | [
"694317828@qq.com"
] | 694317828@qq.com |
8fed339d0d009e1232013c23f8458a9a76188cf0 | 031b24455b953907a0f98778931ee8a03c3c4b6c | /pacman103/core/spinnman/spinnman_utilities.py | 7a49d1ec7d9bba135a19329c236b5ed8deeb1056 | [] | no_license | BRML/HBP-spinnaker-cerebellum | 7e5f69c05d0e51f79442635df58815768f20e6bc | 7fc3eb5c486df66720d227e0e422cbab65c08885 | refs/heads/master | 2020-12-25T23:47:09.416213 | 2015-06-26T09:45:31 | 2015-06-26T09:45:31 | 38,686,607 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,649 | py | __author__ = 'stokesa6'
import os
class SpinnmanUtilities(object):
def __init__(self, dao=None, input_file=None):
self.runtime = None
self.total_processors = None
self.app_loads = list()
self.mem_writes_from_file = list()
self.mem_writes = list()
if dao is not None:
directory = dao.get_reports_directory("transceiver_commands")
self.output_file = os.path.join(directory, "transceiver_commands")
self.output = open(self.output_file, "wb")
self.output_data = list()
if input_file is not None:
self.read_in_file(input_file)
def write_extra_data(self, runtime, total_processors):
self.output_data.insert(0, "TOTAL_PROCESSORS:{}:".format(total_processors))
self.output_data.insert(0, "RUNTIME:{}:".format(runtime))
# different types of writes
def write_app_load_command(self, key, region, core_part_of_region, app_id):
self.output_data.append("APPLOAD:{}:{}:{}:{}:".format(key, region,
core_part_of_region,
app_id))
def write_selects(self, x, y, p):
self.output_data.append("SELECT:{}:{}:{}:".format(x, y, p))
def write_mem_from_file(self, address, type_word, filename):
self.output_data.append("WRITE_MEM_FROM_FILE:{}:{}:{}:".
format(address, int(type_word), filename))
def write_mem(self, address, type_word, structure):
self.output_data.append("WRITE_MEM:{}:{}:{}:".
format(address, int(type_word), structure))
def close(self):
for line in self.output_data:
self.output.write(line + "\n")
self.output.flush()
self.output.close()
def get_run_time(self):
return self.runtime
def get_total_processors(self):
return self.total_processors
def get_app_loads(self):
return self.app_loads
def get_mem_writes(self):
return self.mem_writes
def get_mem_writes_from_file(self):
return self.mem_writes_from_file
def read_in_file(self, input_file):
inputfile = open(input_file, "r")
content = inputfile.readlines()
self.runtime = content[0].split(":")[1]
self.total_processors = content[1].split(":")[1]
self.app_loads = list()
data = None
line = 0
for line in range(2, len(content)):
bits = content[line].split(":")
if bits[0] == "APPLOAD":
data = dict()
data['key'] = bits[1]
data['region'] = bits[2]
data['core_part_of_region'] = bits[3]
data['app_id'] = bits[4]
self.app_loads.append(data)
elif bits[0] == "SELECT":
data = dict()
data['x'] = bits[1]
data['y'] = bits[2]
data['p'] = bits[3]
elif bits[0] == "WRITE_MEM":
self.mem_writes.append(data)
data = dict()
data['address'] = bits[1]
data['type_word'] = bits[2]
data['structure'] = bits[3]
self.mem_writes.append(data)
elif bits[0] == "WRITE_MEM_FROM_FILE":
self.mem_writes_from_file.append(data)
data = dict()
data['address'] = bits[1]
data['type_word'] = bits[2]
data['filename'] = bits[3]
self.mem_writes_from_file.append(data)
| [
"dr.christoph.richter@gmail.com"
] | dr.christoph.richter@gmail.com |
ebc299fc609c9f70d3b6bb73526b7c530b6aad85 | f65be296b831982b187cb3c3a1c82740fec15b5a | /ineco_tax_include/account.py | bd62769cf187b9dbca6b460bc4eff50915aaa2f6 | [] | no_license | nitikarnh/bpe_module | ab05af81f7dae10129ec584233423d4e5c3c7f3d | 6b1057495b277dc69023554d5d4e7bf172ba07c1 | refs/heads/master | 2020-05-21T16:40:05.291099 | 2017-10-24T09:11:01 | 2017-10-24T09:11:01 | 64,814,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
from lxml import etree
from openerp import models, fields, api, _
from openerp.exceptions import except_orm, Warning, RedirectWarning
from openerp.tools import float_compare
import openerp.addons.decimal_precision as dp
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
#self.amount_untaxed = sum(line.price_subtotal for line in self.invoice_line)
#self.amount_tax = sum(line.amount for line in self.tax_line)
#self.amount_total = self.amount_untaxed + self.amount_tax
tax_included = False
subtotal = alltotal = 0.0
percent = 0.0
for line in self.invoice_line:
alltotal += line.quantity * line.price_unit
if line.invoice_line_tax_id:
for tax in line.invoice_line_tax_id:
tax_included = tax.price_include == True
percent = tax.amount
subtotal += line.quantity * line.price_unit
amount_total = amount_tax = amount_untaxed = 0.0
if tax_included:
amount_total = subtotal
amount_tax = subtotal * (percent / (1 + percent))
amount_untaxed = amount_total - amount_tax
else:
amount_untaxed = subtotal
amount_tax = subtotal * percent
amount_total = amount_untaxed + amount_tax
self.amount_untaxed = amount_untaxed
self.amount_tax = amount_tax
self.amount_total = amount_total
class AccountInvoiceTax(models.Model):
_inherit = 'account.invoice.tax'
@api.v8
def compute(self, invoice):
tax_grouped = {}
currency = invoice.currency_id.with_context(date=invoice.date_invoice or fields.Date.context_today(invoice))
company_currency = invoice.company_id.currency_id
tax_included = False
subtotal = alltotal = 0.0
percent = 0.0
for line in invoice.invoice_line:
alltotal += line.quantity * line.price_unit
if line.invoice_line_tax_id:
for tax in line.invoice_line_tax_id:
tax_included = tax.price_include == True
percent = tax.amount
subtotal += line.quantity * line.price_unit
taxes = line.invoice_line_tax_id.compute_all(
(line.price_unit * (1 - (line.discount or 0.0) / 100.0)),
line.quantity, line.product_id, invoice.partner_id)['taxes']
for tax in taxes:
val = {
'invoice_id': invoice.id,
'name': tax['name'],
'amount': tax['amount'],
'manual': False,
'sequence': tax['sequence'],
'base': currency.round(tax['price_unit'] * line['quantity']),
}
if invoice.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = currency.compute(val['base'] * tax['ref_base_sign'], company_currency, round=False)
val['tax_amount'] = currency.compute(val['amount'] * tax['ref_tax_sign'], company_currency, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
amount_total = amount_tax = amount_untaxed = 0.0
if tax_included:
amount_total = currency.round(subtotal)
amount_tax = currency.round(subtotal * (percent / (1 + percent)))
amount_untaxed = currency.round(amount_total - amount_tax)
else:
amount_untaxed = currency.round(subtotal)
amount_tax = currency.round(subtotal * percent)
amount_total = currency.round(amount_untaxed + amount_tax)
for t in tax_grouped.values():
#t['base'] = currency.round(t['base'])
#t['amount'] = currency.round(t['amount'])
#t['base_amount'] = currency.round(t['base_amount'])
#t['tax_amount'] = currency.round(t['tax_amount'])
t['base'] = amount_untaxed
t['amount'] = amount_tax
t['base_amount'] = amount_untaxed
t['tax_amount'] = amount_tax
return tax_grouped
| [
"thitithup@gmail.com"
] | thitithup@gmail.com |
676010352aa2bc30d4196954eda4a4fd5a1caaa7 | 7fbb5025602c99d1438e94d3db4f1ad886be18fc | /mpf/modes/bonus/__init__.py | 00adf485510832881f55c6ee7f01aa065dc67d69 | [
"MIT",
"CC-BY-4.0"
] | permissive | abencz/mpf | 519bd54c4ea7fb230eaf48316845091893a06489 | c1f40d1c94bdb77e60cef15f531bc98f443ceec9 | refs/heads/0.30 | 2020-04-05T18:30:37.819828 | 2016-08-29T22:39:08 | 2016-08-29T22:39:08 | 65,430,713 | 0 | 0 | null | 2016-08-11T02:12:01 | 2016-08-11T02:12:01 | null | UTF-8 | Python | false | false | 29 | py | """Contains a bonus mode."""
| [
"jan-mpf@kantert.net"
] | jan-mpf@kantert.net |
a534743075eff3bc484d4ecb49af2297db59755c | 8ded62b315fc3e325833d91caa885776e46ebead | /geatpy/demo/single_objective_demo5/main.py | 3fea0027833b9470ad3502501703584c2e785879 | [] | no_license | CL-White/geatpy | 46079c501d72763b629b9654e07d38adefa2f848 | 3921306b310c91f0bb7aab22f718ab0ba44d600b | refs/heads/master | 2020-04-13T23:59:03.770713 | 2018-12-21T06:55:07 | 2018-12-21T06:55:07 | 163,521,396 | 1 | 0 | null | 2018-12-29T15:30:55 | 2018-12-29T15:30:55 | null | UTF-8 | Python | false | false | 1,629 | py | # -*- coding: utf-8 -*-
"""
执行脚本main.py
描述:
该demo是展示如何计算带约束的任务指派问题
其中目标函数和约束条件写在aimfuc.py文件中
问题如下:
设有5个人,5个任务。
已知这5个人每小时工作要求的工资分别是1,2,3,4,5元,
而这5个任务分别耗时1,2,3,4,5小时。
此外,已知工人1无法完成第2和第4个任务;工人3无法完成第1和第4个任务。
现要求给每个人分配去完成不同的任务,要求老板一共支付工人的工钱数最少。
因为问题需要用排列编码的染色体来解决,因此本案例调用了“sga_new_permut_templet”这个算法模板,其详细用法可利用help命令查看,或是在github下载并查看源码
调用算法模板时可以设置drawing=2,此时算法模板将在种群进化过程中绘制动画,但注意执行前要在Python控制台执行命令matplotlib qt5。
"""
import geatpy as ga
# 获取函数接口地址
AIM_M = __import__('aimfuc')
# 参数设置
NVAR = 5 # 排列编码的染色体长度
VarLen = 5 # 排列集合的大小,等于5表示排列集合为{1,2,3,4,5}
# 调用编程模板,其中recombinStyle要设置为'xovpm',对于排列编码问题,要采用特殊的xovpm(部分匹配交叉)的重组方式
[pop_trace, var_trace, times] = ga.sga_new_permut_templet(AIM_M, 'aimfuc', None, None, NVAR, VarLen, maxormin = 1, MAXGEN = 100, NIND = 10, SUBPOP = 1, GGAP = 0.9, selectStyle = 'etour', recombinStyle = 'xovpm', recopt = 0.9, pm = 0.1, distribute = True, drawing = 1)
| [
"jazzbin@geatpy.com"
] | jazzbin@geatpy.com |
ac0e03f089553008be7076ba6ad23656ff3032d2 | 00540621f1ce1038a6fd5550b868931d9d02829a | /IPProxyPool/util/compatibility.py | 9e21c93b7aaeba3210dd4c79fda013c91eea4603 | [
"Apache-2.0"
] | permissive | MisterZhouZhou/pythonLearn | 392a3c0f31d0d3a61a43ae27b4a24c6d15316722 | 8933c7a6d444d3d86a173984e6cf4c08dbf84039 | refs/heads/master | 2020-05-19T18:53:08.451360 | 2019-07-18T07:01:51 | 2019-07-18T07:01:51 | 185,164,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 442 | py | # coding:utf-8
import sys
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
binary_type = bytes
else:
text_type = unicode
binary_type = str
def text_(s, encoding='utf-8', errors='strict'):
if isinstance(s, binary_type):
return s.decode(encoding, errors)
return s
def bytes_(s, encoding='utf-8', errors='strict'):
if isinstance(s, text_type):
return s.encode(encoding, errors)
return s | [
"m15038960016@163.com"
] | m15038960016@163.com |
179e8aa96fbd0a11d883b2da40dc69431c00f40e | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/v1_0/devicescorpmgt_v1_0/azext_devicescorpmgt_v1_0/vendored_sdks/devicescorpmgt/aio/operations_async/_device_app_management_managed_app_registration_applied_policy_operations_async.py | 8fb43691c6645917a280dbc32ead62781233dd0b | [
"MIT"
] | permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,249 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DeviceAppManagementManagedAppRegistrationAppliedPolicyOperations:
"""DeviceAppManagementManagedAppRegistrationAppliedPolicyOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~devices_corporate_management.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def target_app(
self,
managed_app_registration_id: str,
managed_app_policy_id: str,
apps: Optional[List["models.MicrosoftGraphManagedMobileApp"]] = None,
**kwargs
) -> None:
"""Invoke action targetApps.
Invoke action targetApps.
:param managed_app_registration_id: key: id of managedAppRegistration.
:type managed_app_registration_id: str
:param managed_app_policy_id: key: id of managedAppPolicy.
:type managed_app_policy_id: str
:param apps:
:type apps: list[~devices_corporate_management.models.MicrosoftGraphManagedMobileApp]
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsVf2Dh9DeviceappmanagementManagedappregistrationsManagedappregistrationIdAppliedpoliciesManagedapppolicyIdMicrosoftGraphTargetappsPostRequestbodyContentApplicationJsonSchema(apps=apps)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.target_app.metadata['url'] # type: ignore
path_format_arguments = {
'managedAppRegistration-id': self._serialize.url("managed_app_registration_id", managed_app_registration_id, 'str'),
'managedAppPolicy-id': self._serialize.url("managed_app_policy_id", managed_app_policy_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsVf2Dh9DeviceappmanagementManagedappregistrationsManagedappregistrationIdAppliedpoliciesManagedapppolicyIdMicrosoftGraphTargetappsPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
target_app.metadata = {'url': '/deviceAppManagement/managedAppRegistrations/{managedAppRegistration-id}/appliedPolicies/{managedAppPolicy-id}/microsoft.graph.targetApps'} # type: ignore
| [
"japhethobalak@gmail.com"
] | japhethobalak@gmail.com |
b2545483337881d35bc202af30352ec1f39361c6 | a44b918826fcbeccb6b70a295d98ca0a31007c13 | /Koudai/Server/release/Script/PyScript/Action/action12057.py | 568de94028d8a9ba6a5488d3dd22f347debf6ba1 | [] | no_license | Pattentively/Scut-samples | a5eff14506f137e409aa7f6139825fbff09e40a2 | 824037857450ca6e580d2bf6710679331132e6fe | refs/heads/master | 2021-01-22T01:05:22.578877 | 2015-04-27T01:53:53 | 2015-04-27T01:53:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | import clr, sys
import random
import time
import datetime
import ReferenceLib
from lang import Lang
from action import *
from System import *
from System.Collections.Generic import *
from ZyGames.Framework.Common.Log import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.BLL.Base import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
from ZyGames.Tianjiexing.BLL.Combat import *
from ZyGames.Tianjiexing.Model.Enum import *
# 12057_地图列表接口
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self);
self.plotID = 0;
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self);
self.mapList = [];
self.userPlotPackage = None;
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
return urlParam;
def takeAction(urlParam, parent):
actionResult = ActionResult();
userId = parent.Current.User.PersonalId;
contextUser = parent.Current.User;
# 加载数据出错
def loadError():
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("LoadError");
actionResult.Result = False;
return actionResult;
# 判断玩家等级是否达到 20 级
if contextUser.UserLv < 20:
parent.ErrorCode = Lang.getLang("ErrorCode");
parent.ErrorInfo = Lang.getLang("St12057_UserLvNotEnough");
actionResult.Result = False;
return actionResult;
userPlotPackage = GameDataCacheSet[UserPlotPackage]().FindKey(userId);
# 下发地图列表
plotList = ConfigCacheSet[PlotInfo]().FindAll(match=lambda x:x.PlotType == PlotType.KaoGuPlot); # 副本地图信息
if not plotList or not userPlotPackage:
return loadError();
# 当玩家等级达到 20 级时,初始化地图数据
plotMapList = userPlotPackage.PlotPackage.FindAll(match=lambda x:x.PlotType == PlotType.KaoGuPlot);
if not plotMapList and contextUser.UserLv >= 20:
UserArchaeologyHelper.InitializeMapInfo(userId);
userPlotPackage = GameDataCacheSet[UserPlotPackage]().FindKey(userId);
actionResult.mapList = plotList;
actionResult.userPlotPackage = userPlotPackage.PlotPackage;
return actionResult;
def buildPacket(writer, urlParam, actionResult):
mapList = actionResult.mapList
userPlotPackage = actionResult.userPlotPackage;
# 地图列表
writer.PushIntoStack(len(mapList));
for info in mapList:
dsItem = DataStruct();
dsItem.PushIntoStack(info.PlotID);
dsItem.PushIntoStack(info.PlotName);
dsItem.PushIntoStack(info.BossHeadID);
dsItem.PushIntoStack(info.KgScene);
mapInfo = userPlotPackage.Find(match=lambda x:x.PlotID == info.PlotID);
dsItem.PushShortIntoStack(1 if mapInfo else 0);
writer.PushIntoStack(dsItem);
return True; | [
"wzf_88@qq.com"
] | wzf_88@qq.com |
d9d2239e1b1af794739b1c6def6e3cfe648785d9 | ddda55fcfc84ac5cd78cfc5c336a3df0b9096157 | /projects/gd32f303-demo/board/SConscript | 060557bb0a4e56f4bc0b79aa63de2f2cbaa77954 | [
"Apache-2.0"
] | permissive | liu-delong/lu_xing_xiang_one_os | 701b74fceb82dbb2806518bfb07eb85415fab43a | 0c659cb811792f2e190d5a004a531bab4a9427ad | refs/heads/master | 2023-06-17T03:02:13.426431 | 2021-06-28T08:12:41 | 2021-06-28T08:12:41 | 379,661,507 | 2 | 2 | Apache-2.0 | 2021-06-28T10:08:10 | 2021-06-23T16:11:54 | C | UTF-8 | Python | false | false | 1,037 | import os
import osconfig
from build_tools import *
sys.path.append(Env['OS_ROOT'] + '/drivers/hal/gd/scripts/')
pwd = PresentDir()
LIBS = []
LIBPATH = []
# add general drivers
src = Split('''
board.c
CubeMX_Config/Src/gd32f30x_it.c
CubeMX_Config/Src/system_gd32f30x.c
''')
# path include path in project
path = [pwd]
path += [pwd + '/ports']
path += [pwd + '/../../../drivers/hal/gd/GD32F30x_HAL/GD32F30x_standard_peripheral/Include']
path += [pwd + '/../../../drivers/hal/gd/GD32F30x_HAL/CMSIS/GD/GD32F30x/Include']
path += [pwd + '/CubeMX_Config/Inc']
path += [pwd + '/../../../drivers/hal/gd/GD32F30x_HAL/CMSIS']
if osconfig.CROSS_TOOL == 'gcc':
src += [pwd + '/startup/startup_gd32f30x_hd_gcc.s']
elif osconfig.CROSS_TOOL == 'keil':
src += [pwd + '/startup/startup_gd32f30x_hd_arm.s']
elif osconfig.CROSS_TOOL == 'iar':
src += [pwd + '/startup/startup_gd32f30x_hd_iar.s']
CPPDEFINES = ['GD32F30X_HD']
group = AddCodeGroup('bsp', src, depend = [''], CPPPATH = path, CPPDEFINES = CPPDEFINES)
Return('group')
| [
"cmcc_oneos@cmiot.chinamobile.com"
] | cmcc_oneos@cmiot.chinamobile.com | |
d9ae372f842f837ad2873725740adc882eeccff2 | 684a9016bf00e132eab3c9cf4534639ae096cfc5 | /Main/dlnaupnpserver/ServerClass.py | ab0249376c9d385dcead51ad84cea188e86f85d2 | [
"BSD-3-Clause",
"MIT"
] | permissive | pszafer/dlna_upnp_invention | d97a0c641d1d7b170378f0fad5b978d8e5576966 | 497d173a9e3883412dbbb17cafa826a0394ff849 | refs/heads/master | 2021-01-02T09:35:04.628203 | 2012-07-10T15:02:53 | 2012-07-10T15:02:53 | 1,980,638 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,922 | py | '''
Created on 05-07-2011
@copyright: 2011,
@author: Pawel Szafer
@license: Licensed under the BSD license
http://www.opensource.org/licenses/bsd-license.php
@contact: pszafer@gmail.com
@version: 0.8
@note:this is only test module
'''
from twisted.internet import reactor
from modCoherence.base import Coherence
import gnome.ui
import gnomevfs
import gettext
import os
import struct
import sys
class ServerClass(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
print "initek"
def check_device(self, device):
print "check device"
print "found device %s of type %s - %r" %(device.get_friendly_name(),
device.get_device_type(),
device.client)
def start(self):
print "I'm started"
config = {'logmode':'warning'}
c = Coherence(config)
print "to connect"
c.connect(self.check_device, 'Coherence.UPnP.Device.detection_completed')
print "start"
#myClass = ServerClass()
#reactor.callWhenRunning(ServerClass().start)
#reactor.run()
#header = {}
#header['user-agent'] = 'Microsoft-Windows/6.1 UPnP/1.0 Windows-Media-Player/12.0.7601.17514 DLNADOC/1.50 (MS-DeviceCaps/1024)'
#test = header['user-agent'].find('blee')
#print test
#filename = "file:///home/xps/Wideo/test/test2/Friends_S06_E20.avi"
#filename = "/home/xps/.thumbnails/normal/f1d2e7cf33db9de55a6fe49b91a63b1b.png"
#hash_from_path = str(id(filename))
#print hash_from_path
import subprocess
def getFileMetadata(filename):
result = subprocess.Popen(["ffprobe", filename],
stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
duration_line = [x for x in result.stdout.readlines() if "Duration" in x]
duration = duration_line[(duration_line.index('Duration: ',) + len('Duration: ')):duration_line.index(', start')]
return duration
def create_thumbnail_via_gnome(uri):
mimetype = gnomevfs.get_mime_type(uri)
thumbFactory = gnome.ui.ThumbnailFactory(gnome.ui.THUMBNAIL_SIZE_NORMAL)
if thumbFactory.can_thumbnail(uri, mimetype,0):
thumbnail = thumbFactory.generate_thumbnail(uri, mimetype)
print "here"
if thumbnail != None:
thumbFactory.save_thumbnail(thumbnail, uri, 0)
print "passed"
#uri = "file:///home/xps/Wideo/test/test2/Friends_S06_E20.avi"
#create_thumbnail_via_gnome(uri)
#Duration: 00:21:55.64, start: 0.000000, bitrate: 1485 kb/s
# print "test"
#import Image
#im = Image.open("/home/xps/Obrazy/toyota_public/toyota_1.jpg")
#print im.size
#int = "aaa"
#b= None
#for i in im.size:
# b += str(i)+"x"
#b = b[:len(b)-1]
#print b
#a = [66.25, 333, 335, 1, 1234.5]
#
#
#print a
#print a[:2]
#
#for i in range(0, 2):
# print a[i]
#itemmimetype = "x-mkv"
#itemmimetype = "avi"
#print itemmimetype.replace("x-", "")
#
#zara = {}
#
#zara['test'] = "aaa"
#zara['test2'] = "bbb"
#
#for i in zara:
# print i[0]
# print i[1]
from StringIO import StringIO
APP="dlnaupnpserver"
DIR=os.path.dirname (__file__) + '/locale'
#locale.setlocale(locale.LC_ALL, '')
#gettext.bindtextdomain(APP, DIR)
#gettext.textdomain(APP)
#_ = gettext.gettext
#gettext.install(APP, './locale', unicode=1)
#translations = gettext.translation(APP, "./locale", languages=['pl'])
#translations.install()
#print _("Image")
y = "hehehehe"
#b =
PROJECT_DIR = os.path.normpath(os.path.dirname(__file__))
new_dir, _ = os.path.split(PROJECT_DIR)
print sys.path
sys.path.insert(0, new_dir)
print sys.path
#liczba = round(286/72,4)
#
#liczba = (286 + 72 // 2) // 72
#print liczba
#print round(liczba,0)
#dur = str(getFileMetadata("/home/xps/Wideo/test/test2/Friends_S06_E20.avi"))
#ind = dur.index(', start')
#print ind
#print max(dur)
#dur1 = dur[(dur.index('Duration: ',) + len('Duration: ')):dur.index(', start')]
#print dur1
#print "stop" | [
"pszafer@gmail.com"
] | pszafer@gmail.com |
15ba5830a4ff5c10b5f9b04726ad9ef517dec34e | 0e647273cffc1fb6cbd589fa3c7c277b221ba247 | /configs/hpt-pretrain/chexpert_rm_color/base-chexpert_rm_color-config.py | bf0ba57feabab69c163a32da34d296ce91df454b | [
"Apache-2.0"
] | permissive | Berkeley-Data/OpenSelfSup | e9976bf011b69ebf918506ba184f464b1073ec13 | 221191b88d891de57725b149caf237ffef72e529 | refs/heads/master | 2023-05-12T07:34:52.268476 | 2021-04-08T00:58:37 | 2021-04-08T00:58:37 | 343,654,823 | 0 | 1 | Apache-2.0 | 2021-04-08T00:58:37 | 2021-03-02T05:20:27 | Python | UTF-8 | Python | false | false | 2,019 | py | _base_ = '../../base.py'
# model settings
model = dict(
type='MOCO',
pretrained=None,
queue_len=65536,
feat_dim=128,
momentum=0.999,
backbone=dict(
type='ResNet',
depth=50,
in_channels=3,
out_indices=[4], # 0: conv-1, x: stage-x
norm_cfg=dict(type='BN')),
neck=dict(
type='NonLinearNeckV1',
in_channels=2048,
hid_channels=2048,
out_channels=128,
with_avg_pool=True),
head=dict(type='ContrastiveHead', temperature=0.2))
# dataset settings
data_source_cfg = dict(
type='ImageNet',
memcached=False,
mclient_path='/not/used')
data_train_list = "data/chexpert/meta/train-val.txt"
data_train_root = "data/chexpert"
dataset_type = 'ContrastiveDataset'
img_norm_cfg = dict(mean=[0.5028, 0.5028, 0.5028], std=[0.2919, 0.2919, 0.2919])
train_pipeline = [
dict(type='RandomResizedCrop', size=224, scale=(0.2, 1.)),
dict(type='RandomGrayscale', p=0.2),
dict(
type='RandomAppliedTrans',
transforms=[
dict(
type='GaussianBlur',
sigma_min=0.1,
sigma_max=2.0)
],
p=0.5),
dict(type='RandomHorizontalFlip'),
dict(type='ToTensor'),
dict(type='Normalize', **img_norm_cfg),
]
data = dict(
imgs_per_gpu=128, # total 64*4=256
workers_per_gpu=4,
drop_last=True,
train=dict(
type=dataset_type,
data_source=dict(
list_file=data_train_list, root=data_train_root,
**data_source_cfg),
pipeline=train_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.03, weight_decay=0.0001, momentum=0.9)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0.)
# cjrd added this flag, since OSS didn't support training by iters(?)
by_iter=True
log_config = dict(
interval=25,
by_epoch=False,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook', by_epoch=False)
])
| [
"taeil.goh@gmail.com"
] | taeil.goh@gmail.com |
ce2901dd98d7ea783a9d14e70295121f5c69db4c | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dli/huaweicloudsdkdli/v1/model/export_sql_job_result_request.py | ce6445aee0752af2e044cada575e4d159696ae5d | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,877 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ExportSqlJobResultRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'job_id': 'str',
'body': 'ExportSqlResultRequestBody'
}
attribute_map = {
'job_id': 'job_id',
'body': 'body'
}
def __init__(self, job_id=None, body=None):
"""ExportSqlJobResultRequest
The model defined in huaweicloud sdk
:param job_id: 作业ID
:type job_id: str
:param body: Body of the ExportSqlJobResultRequest
:type body: :class:`huaweicloudsdkdli.v1.ExportSqlResultRequestBody`
"""
self._job_id = None
self._body = None
self.discriminator = None
self.job_id = job_id
if body is not None:
self.body = body
@property
def job_id(self):
"""Gets the job_id of this ExportSqlJobResultRequest.
作业ID
:return: The job_id of this ExportSqlJobResultRequest.
:rtype: str
"""
return self._job_id
@job_id.setter
def job_id(self, job_id):
"""Sets the job_id of this ExportSqlJobResultRequest.
作业ID
:param job_id: The job_id of this ExportSqlJobResultRequest.
:type job_id: str
"""
self._job_id = job_id
@property
def body(self):
"""Gets the body of this ExportSqlJobResultRequest.
:return: The body of this ExportSqlJobResultRequest.
:rtype: :class:`huaweicloudsdkdli.v1.ExportSqlResultRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this ExportSqlJobResultRequest.
:param body: The body of this ExportSqlJobResultRequest.
:type body: :class:`huaweicloudsdkdli.v1.ExportSqlResultRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExportSqlJobResultRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
19788b4aba6724c127c6db8862d91a21a8586c99 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/applicationinsights/azure-mgmt-applicationinsights/azure/mgmt/applicationinsights/v2021_08_01/models/__init__.py | e592b28040ea7a510acc836e19f07934db25ff4d | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 2,077 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import ManagedServiceIdentity
from ._models_py3 import Resource
from ._models_py3 import SystemData
from ._models_py3 import TrackedResource
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import Workbook
from ._models_py3 import WorkbookError
from ._models_py3 import WorkbookErrorDefinition
from ._models_py3 import WorkbookInnerErrorTrace
from ._models_py3 import WorkbookResource
from ._models_py3 import WorkbookResourceIdentity
from ._models_py3 import WorkbookUpdateParameters
from ._models_py3 import WorkbooksListResult
from ._application_insights_management_client_enums import CategoryType
from ._application_insights_management_client_enums import CreatedByType
from ._application_insights_management_client_enums import Kind
from ._application_insights_management_client_enums import ManagedServiceIdentityType
from ._application_insights_management_client_enums import SharedTypeKind
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ManagedServiceIdentity",
"Resource",
"SystemData",
"TrackedResource",
"UserAssignedIdentity",
"Workbook",
"WorkbookError",
"WorkbookErrorDefinition",
"WorkbookInnerErrorTrace",
"WorkbookResource",
"WorkbookResourceIdentity",
"WorkbookUpdateParameters",
"WorkbooksListResult",
"CategoryType",
"CreatedByType",
"Kind",
"ManagedServiceIdentityType",
"SharedTypeKind",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
96855b7975e0f1672164b652aaf3d0254e458dfe | 271c7959a39f3d7ff63dddf285004fd5badee4d9 | /venv/Lib/site-packages/alembic/testing/mock.py | 08a756cbc27e1fab3cda7021d4cbb7b54f3f0187 | [
"MIT"
] | permissive | natemellendorf/configpy | b6b01ea4db1f2b9109fd4ddb860e9977316ed964 | 750da5eaef33cede9f3ef532453d63e507f34a2c | refs/heads/master | 2022-12-11T05:22:54.289720 | 2019-07-22T05:26:09 | 2019-07-22T05:26:09 | 176,197,442 | 4 | 1 | MIT | 2022-12-08T02:48:51 | 2019-03-18T03:24:12 | Python | UTF-8 | Python | false | false | 791 | py | # testing/mock.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Import stub for mock library.
NOTE: copied/adapted from SQLAlchemy master for backwards compatibility;
this should be removable when Alembic targets SQLAlchemy 1.0.0
"""
from __future__ import absolute_import
from ..util.compat import py3k
if py3k:
from unittest.mock import MagicMock, Mock, call, patch, ANY
else:
try:
from mock import MagicMock, Mock, call, patch, ANY # noqa
except ImportError:
raise ImportError(
"SQLAlchemy's test suite requires the "
"'mock' library as of 0.8.2.")
| [
"nate.mellendorf@gmail.com"
] | nate.mellendorf@gmail.com |
ea7556a795f4b376364386c416a52e7d6026666c | ace30d0a4b1452171123c46eb0f917e106a70225 | /filesystems/vnx_rootfs_lxc_ubuntu64-16.04-v025-openstack-compute/rootfs/usr/lib/python2.7/dist-packages/nova/conf/notifications.py | 0fbb4593cda237954c01736736ed53ac75382305 | [
"Python-2.0"
] | permissive | juancarlosdiaztorres/Ansible-OpenStack | e98aa8c1c59b0c0040c05df292964520dd796f71 | c01951b33e278de9e769c2d0609c0be61d2cb26b | refs/heads/master | 2022-11-21T18:08:21.948330 | 2018-10-15T11:39:20 | 2018-10-15T11:39:20 | 152,568,204 | 0 | 3 | null | 2022-11-19T17:38:49 | 2018-10-11T09:45:48 | Python | UTF-8 | Python | false | false | 3,947 | py | # Copyright (c) 2016 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
notifications_group = cfg.OptGroup(
name='notifications',
title='Notifications options',
help="""
Most of the actions in Nova which manipulate the system state generate
notifications which are posted to the messaging component (e.g. RabbitMQ) and
can be consumed by any service outside the Openstack. More technical details
at http://docs.openstack.org/developer/nova/notifications.html
""")
ALL_OPTS = [
cfg.StrOpt(
'notify_on_state_change',
choices=(None, 'vm_state', 'vm_and_task_state'),
deprecated_group='DEFAULT',
help="""
If set, send compute.instance.update notifications on
instance state changes.
Please refer to
https://docs.openstack.org/nova/latest/reference/notifications.html for
additional information on notifications.
Possible values:
* None - no notifications
* "vm_state" - notifications are sent with VM state transition information in
the ``old_state`` and ``state`` fields. The ``old_task_state`` and
``new_task_state`` fields will be set to the current task_state of the
instance.
* "vm_and_task_state" - notifications are sent with VM and task state
transition information.
"""),
cfg.BoolOpt(
'notify_on_api_faults',
default=False,
deprecated_group='DEFAULT',
deprecated_name='notify_api_faults',
help="""
If enabled, send api.fault notifications on caught exceptions in the
API service.
"""),
cfg.StrOpt(
'default_level',
default='INFO',
choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
deprecated_group='DEFAULT',
deprecated_name='default_notification_level',
help="Default notification level for outgoing notifications."),
cfg.StrOpt(
'default_publisher_id',
default='$my_ip',
deprecated_group='DEFAULT',
help="""
Default publisher_id for outgoing notifications. If you consider routing
notifications using different publisher, change this value accordingly.
Possible values:
* Defaults to the IPv4 address of this host, but it can be any valid
oslo.messaging publisher_id
Related options:
* my_ip - IP address of this host
"""),
cfg.StrOpt(
'notification_format',
choices=['unversioned', 'versioned', 'both'],
default='both',
deprecated_group='DEFAULT',
help="""
Specifies which notification format shall be used by nova.
The default value is fine for most deployments and rarely needs to be changed.
This value can be set to 'versioned' once the infrastructure moves closer to
consuming the newer format of notifications. After this occurs, this option
will be removed (possibly in the "P" release).
Possible values:
* unversioned: Only the legacy unversioned notifications are emitted.
* versioned: Only the new versioned notifications are emitted.
* both: Both the legacy unversioned and the new versioned notifications are
emitted. (Default)
The list of versioned notifications is visible in
http://docs.openstack.org/developer/nova/notifications.html
"""),
]
def register_opts(conf):
conf.register_group(notifications_group)
conf.register_opts(ALL_OPTS, group=notifications_group)
def list_opts():
return {notifications_group: ALL_OPTS}
| [
"jcdiaztorres96@gmail.com"
] | jcdiaztorres96@gmail.com |
a446f60b5e3cc2db0a5399fdccafb587ed11a532 | c8c855a6ebb3b3101e5c3a80b94514c36b103495 | /semana_2/serie_tv.py | b9a2da939fc0ae024cc6661b2e2a9305923e2f70 | [] | no_license | K-A-R-L-A-Robles/poo-1719110219 | 835965c0e3100c9d6770678eb67920945942fa80 | 7d1fc57cd4157e5b52a153210311821d8290144d | refs/heads/master | 2022-11-03T04:54:42.675869 | 2020-06-15T03:46:42 | 2020-06-15T03:46:42 | 265,970,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | class serie_tv:
"Atríbuto"
genero= "drama"
actores= "30"
horario= "7-8pm"
canal= "308"
difinicion = "hd"
edad= "mayores_13"
capitulos = "10"
idioma= "inglés-español"
duracion= "1hora"
trama="tristeza"
"Métodos"
def entretener(self):
print("entretener")
def emociones(self):
print("emociones")
def aprendizaje(self):
print("aprendizaje")
def dinero(self):
print("dinero")
def audencia(self):
print("audencia")
def _init_(self):
print("atributos serie_tv")
print("genero="+str(self.genero))
print("actores"+str(self.actores))
print("horario="+str (self.horario))
print("canal="+str(self.canal))
print("definicion"+str(self.difinicion))
print("edad="+str(self.edad))
print("capítulos="+str(self.capitulos))
print("idioma="+str(self.idioma))
print("duracion="+str(self.duracion))
print("trama="+str(self.trama))
objeto = serie_tv()
objeto.entretener()
objeto.emociones()
objeto.aprendizaje()
objeto.dinero()
objeto.audencia()
objeto._init_() | [
"replituser@example.com"
] | replituser@example.com |
fa88201c4c9b2211e6fc5b0e29819c8d8cb30a1e | 4ff94bdde94640d65c5a429be78cff5c8169689f | /spacescoops/compile.py | 371b1473878434d170213dfa66c1a19ebe30e789 | [] | no_license | unawe/spacescoop | db91058af55fcc51bb6535e89bb3b5f29fb75493 | 35caab11c556c124d04ea8fcb3ad012af7e5e39f | refs/heads/main | 2021-07-24T18:39:09.931385 | 2021-06-14T16:09:38 | 2021-06-14T16:09:38 | 315,893,040 | 2 | 1 | null | 2021-06-14T16:01:23 | 2020-11-25T09:40:44 | JavaScript | UTF-8 | Python | false | false | 717 | py | import os
from django.conf import settings
from django_ext.compiler import PdfCompiler
from .models import Article
OUT_PATH = os.path.join(settings.MEDIA_ROOT, 'articles', 'download')
OUT_URL = os.path.join(settings.MEDIA_URL, 'articles', 'download')
PRINT_PREVIEW_URLPATH = 'scoops:print-preview'
def pdf_filename(obj):
return 'spacescoop-%s%s-%s.pdf' % (obj.code, obj.language_code, obj.slug)
compiler = PdfCompiler(Article, OUT_PATH, OUT_URL, pdf_filename, PRINT_PREVIEW_URLPATH)
def make_pdf(code, lang, site_url=None):
if not site_url:
site_url = settings.SITE_URL
compiler.make_pdf(code, lang, site_url)
def get_pdf(code, lang):
return compiler.get_pdf(code, lang, 'scoops')
| [
"edward@gomez.me.uk"
] | edward@gomez.me.uk |
a29a1f36d0199651c392bdc7122776de7e49a978 | d1f1f05e4713c4011634f0e4247798f31d58c3a6 | /scanning/base_raster_slow_scan.py | 3fc9545e482a9605f3f36e04c9fb64000623039b | [
"BSD-3-Clause"
] | permissive | patrickurban/ScopeFoundry | 8463695277bfb9c0b267b3b6f02fe5a1d20b4fd9 | 4c5a9430fad0b63d39014bed81a6ffc0af07c4df | refs/heads/master | 2021-01-01T16:48:24.432671 | 2017-06-08T23:26:26 | 2017-06-08T23:26:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,841 | py | from .base_raster_scan import BaseRaster2DScan
from ScopeFoundry import h5_io
import numpy as np
import time
import os
class BaseRaster2DSlowScan(BaseRaster2DScan):
name = "base_raster_2Dslowscan"
def run(self):
S = self.settings
#Hardware
# self.apd_counter_hc = self.app.hardware_components['apd_counter']
# self.apd_count_rate = self.apd_counter_hc.apd_count_rate
# self.stage = self.app.hardware_components['dummy_xy_stage']
# Data File
# H5
# Compute data arrays
self.compute_scan_arrays()
self.initial_scan_setup_plotting = True
self.display_image_map = np.zeros(self.scan_shape, dtype=float)
while not self.interrupt_measurement_called:
try:
# h5 data file setup
self.t0 = time.time()
if self.settings['save_h5']:
self.h5_file = h5_io.h5_base_file(self.app, measurement=self)
self.h5_file.attrs['time_id'] = self.t0
H = self.h5_meas_group = h5_io.h5_create_measurement_group(self, self.h5_file)
#create h5 data arrays
H['h_array'] = self.h_array
H['v_array'] = self.v_array
H['range_extent'] = self.range_extent
H['corners'] = self.corners
H['imshow_extent'] = self.imshow_extent
H['scan_h_positions'] = self.scan_h_positions
H['scan_v_positions'] = self.scan_v_positions
H['scan_slow_move'] = self.scan_slow_move
H['scan_index_array'] = self.scan_index_array
# start scan
self.pixel_i = 0
self.current_scan_index = self.scan_index_array[0]
self.pixel_time = np.zeros(self.scan_shape, dtype=float)
if self.settings['save_h5']:
self.pixel_time_h5 = H.create_dataset(name='pixel_time', shape=self.scan_shape, dtype=float)
self.pre_scan_setup()
self.move_position_start(self.scan_h_positions[0], self.scan_v_positions[0])
for self.pixel_i in range(self.Npixels):
if self.interrupt_measurement_called: break
i = self.pixel_i
self.current_scan_index = self.scan_index_array[i]
kk, jj, ii = self.current_scan_index
h,v = self.scan_h_positions[i], self.scan_v_positions[i]
if self.pixel_i == 0:
dh = 0
dv = 0
else:
dh = self.scan_h_positions[i] - self.scan_h_positions[i-1]
dv = self.scan_v_positions[i] - self.scan_v_positions[i-1]
if self.scan_slow_move[i]:
self.move_position_slow(h,v, dh, dv)
if self.settings['save_h5']:
self.h5_file.flush() # flush data to file every slow move
#self.app.qtapp.ProcessEvents()
time.sleep(0.01)
else:
self.move_position_fast(h,v, dh, dv)
self.pos = (h,v)
# each pixel:
# acquire signal and save to data array
pixel_t0 = time.time()
self.pixel_time[kk, jj, ii] = pixel_t0
if self.settings['save_h5']:
self.pixel_time_h5[kk, jj, ii] = pixel_t0
self.collect_pixel(self.pixel_i, kk, jj, ii)
S['progress'] = 100.0*self.pixel_i / (self.Npixels)
finally:
self.post_scan_cleanup()
if hasattr(self, 'h5_file'):
print('h5_file', self.h5_file)
try:
self.h5_file.close()
except ValueError as err:
self.log.warning('failed to close h5_file: {}'.format(err))
if not self.settings['continuous_scan']:
break
print(self.name, 'done')
def move_position_start(self, h,v):
self.stage.settings.x_position.update_value(h)
self.stage.settings.y_position.update_value(v)
def move_position_slow(self, h,v, dh, dv):
self.stage.settings.x_position.update_value(h)
self.stage.settings.y_position.update_value(v)
def move_position_fast(self, h,v, dh, dv):
self.stage.settings.x_position.update_value(h)
self.stage.settings.y_position.update_value(v)
#x = self.stage.settings['x_position']
#y = self.stage.settings['y_position']
#x = self.stage.settings.x_position.read_from_hardware()
#y = self.stage.settings.y_position.read_from_hardware()
#print(x,y)
def pre_scan_setup(self):
print(self.name, "pre_scan_setup not implemented")
# hardware
# create data arrays
# update figure
def collect_pixel(self, pixel_num, k, j, i):
# collect data
# store in arrays
print(self.name, "collect_pixel", pixel_num, k,j,i, "not implemented")
def post_scan_cleanup(self):
print(self.name, "post_scan_setup not implemented")
def new_pt_pos(self, x,y):
self.move_position_start(x, y)
| [
"esbarnard@lbl.gov"
] | esbarnard@lbl.gov |
4666470fddd067baae63e9674af988c217531a52 | ff7d3116024c9df01b94191ddfa334e4a6782ae6 | /arbeid/wsgi.py | 7e7bcbe2174f9dbca3af07aa337ee738a4dfd3fc | [
"MIT"
] | permissive | jhnnsrs/arbeider | f5f708ee1026a9e9573a6f8a87c3b9e2fd6b5e33 | 4c5637913331c998a262ae0deca516b236845200 | refs/heads/master | 2021-05-26T10:31:16.279628 | 2020-04-08T13:40:26 | 2020-04-08T13:40:26 | 254,095,863 | 0 | 0 | MIT | 2020-04-08T13:40:28 | 2020-04-08T13:29:31 | null | UTF-8 | Python | false | false | 389 | py | """
WSGI config for arbeid project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'arbeid.settings')
application = get_wsgi_application()
| [
"jhnnsrs@gmail..com"
] | jhnnsrs@gmail..com |
cea6a9d224a588d0e6741186eb9b225d866a0cf1 | 06292f96cba132ca57777672a447cfff7c5abee6 | /week5/tut/submit/2.py | 965de3af9b37ce5f09db1489af166faccca72298 | [] | no_license | kietteik/ppl | 1746440b12affe71e67d6f958922b32b1fdaab5c | 2ee60582e81595b8d8b5d0f8212d20151cfe9264 | refs/heads/master | 2023-03-01T00:24:36.969189 | 2021-01-31T05:15:13 | 2021-01-31T05:15:13 | 305,802,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from functools import reduce
def flatten(lst):
'''2. a'''
return [item for sub_lst in lst for item in sub_lst]
def flatten(lst):
'''2. b'''
if not lst:
return []
return lst[0] + flatten(lst[1:])
def flatten(lst):
'''2. c'''
return list(reduce(lambda x, y: x + y, lst, []))
| [
"kietteikdoi@gmail.com"
] | kietteikdoi@gmail.com |
c5e0d6a51c84cd160f45fb4691d64910fd51cf86 | 2579f37a13cfbb47944c5b81c6e83ca710b29f88 | /Server/core/Server_commication_User.py | df8157c2a18a3c9ad89863727fd336831b399315 | [] | no_license | YangQian1992/FTP | 932f32d5ed934bae295bd674757f7af23d0ad1ba | 87d3a78522e7eb8058def1d74d7f32f0f61f1b86 | refs/heads/master | 2020-03-31T16:28:35.146329 | 2018-10-10T06:53:12 | 2018-10-10T06:53:12 | 152,376,641 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,942 | py | import sys
import os
import hashlib
from Server.config.Server_config_info import server_config_info
#建立基础常量
IP_PORT = (server_config_info['SERVER_IP'],server_config_info['SERVER_PORT'])
class user_info:
def __init__(self):
#sys.path[0]='G:\\PycharmProjects\\项目\\ftp_proj_base\\Server\\core'
server_path_li = sys.path[0].split(os.sep)[:-1]
#server_path_li =['G:','PycharmProjects','项目','ftp_proj_base','Server']
server_path_li.extend(['usr&pwd','username&password'])
# server_path_li =['G:','PycharmProjects','项目','ftp_proj_base','Server',
# 'usr&pwd','username&password']
self.server_path = (os.sep).join(server_path_li)
#server_path='G:\\PycharmProjects\\项目\\ftp_proj_base\\Server\\usr&pwd\\username&password'
self.ftp_root = server_config_info['FTP_ROOT'] #家目录
self.auth_key = server_config_info['AUTH_KEY']
self.coding = server_config_info['CODING']
def get_pwd(self,pwd):
md5_obj = hashlib.md5(pwd.encode(self.coding))
md5_obj.update(self.auth_key.encode(self.coding))
return md5_obj.hexdigest()
#将用户密码本中的信息首先加载到一个字典中
#{'杨倩':{'password':password(md5),'times':0,'root_path',root_path},
# '张三':{'password':password(md5)........}}
def load_user_info(self):
self.user_info_dic = {}
with open(self.server_path,encoding=self.coding,mode='r') as f:
for info in f:
username, password = info.split()
root_path = '%s%s%s' % (self.ftp_root,os.sep,username)
self.user_info_dic[username] ={'password':password,
'times': 0,
'root_path':root_path}
if not os.path.exists(root_path):
os.mkdir(root_path)
#服务器判定客户端登录是否成功的方法
def login(self,usr,pwd):
pwd = self.get_pwd(pwd)
#如果用户名不在文本中
if usr not in self.user_info_dic.keys():
return [False,'登录失败','用户名不存在,请注册!']
if (self.user_info_dic[usr] !='') and (self.user_info_dic[usr]['times'] < 3) \
and (self.user_info_dic[usr]['password'] == pwd):
self.user_info_dic[usr]['times'] +=0
return [True,'登录成功!',self.user_info_dic[usr]['root_path']]
elif self.user_info_dic[usr] != '' and self.user_info_dic[usr]['times'] < 3\
and self.user_info_dic[usr]['password'] != pwd:
self.user_info_dic[usr]['times'] += 1
return [False,'登录失败,密码错误,还剩%d次机会!' % (3-self.user_info_dic[usr]['times'])]
elif self.user_info_dic[usr] != '' and self.user_info_dic[usr]['times'] == 3\
and self.user_info_dic[usr]['password'] != pwd:
return [False,'登录失败,账户被锁,隔一段时间再登录吧!']
else:
return [False,'登录失败,当前用户不存在,请先注册!']
#服务端判定是否注册成功的方法
def register(self,usr,pwd):
if usr in self.user_info_dic.keys():
return [False, '你注册的用户名以存在,请更换']
else:
pwd = self.get_pwd(pwd)
self.user_info_dic[usr] = {'password':pwd,'times':0}
#将注册成功后的信息写入密码本
with open(self.server_path, encoding=self.coding,mode='a') as f:
f.write('\n%s %s' % (usr,pwd))
root_path = '%s%s%s' % (self.ftp_root,os.sep,usr)
os.mkdir(root_path) #根据注册成功后的用户的信息,建立专属家目录
self.user_info_dic[usr]['root_path'] = root_path #更新家目录
return [True,'注册成功!', root_path]
| [
"1289089651@qq.com"
] | 1289089651@qq.com |
863e8c47634a573e339090752dad971e80cb3be0 | 795b68819d51af14dfabb8dbe40c9e8153029188 | /make_spiral.py | e65ca25352a9beeefce09d476853f079e5c97963 | [] | no_license | MotazBellah/Code-Challenge | 507f1fd3d5b3265e54905979c80d609afd81c54d | c38c95239193e26c1a88f6736d2ab9ee37185964 | refs/heads/master | 2022-02-25T02:54:10.216892 | 2022-02-19T19:28:05 | 2022-02-19T19:28:05 | 193,115,018 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | # https://www.codewars.com/kata/534e01fbbb17187c7e0000c6/train/python
# Your task, is to create a NxN spiral with a given size.
# For example, spiral with size 5 should look like this:
# 00000
# ....0
# 000.0
# 0...0
# 00000
# Return value should contain array of arrays, of 0 and 1, for example for given size 5 result should be:
# [[1,1,1,1,1],[0,0,0,0,1],[1,1,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]
# Because of the edge-cases for tiny spirals, the size will be at least 5.
# General rule-of-a-thumb is, that the snake made with '1' cannot touch to itself.
def spiralize(size):
if size <= 0:
return []
core = [ [[1,1,1,1], [0,0,0,1], [1,0,0,1], [1,1,1,1]], [[1]], [[1,1],[0,1]], [[1,1,1],[0,0,1],[1,1,1]] ][size%4]
while len(core) < size:
for x in [0,1]:
core.insert(0, [ x for i in core[0] ] )
core.append([ x for i in core[0] ])
for line in core:
line.insert(0, x)
line.append(x)
core[1][0] = int(not x)
return core
| [
"engineer.android@yahoo.com"
] | engineer.android@yahoo.com |
6036c62127661ec04d262385fa810421752a0bde | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02647/s587144244.py | 32c73c2e731aa2116ac14b30639794e570dcf3d6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 465 | py | def action(A, N):
B = [0] * N
for i, bright in enumerate(A):
s = max(0, i - bright)
B[s] += 1
e = min(N, i + bright)
if e < N - 1:
B[e + 1] -= 1
for i in range(N - 1):
B[i + 1] += B[i]
return B
def main():
N, K = map(int, input().split())
A = list(map(int, input().split()))
for i in range(K):
A = action(A, N)
if (A[0] == N) & all(A):
break
print(*A, sep=" ")
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
99fd7c389db08acda41a846ad6ffa8cc4039453c | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /D6XfxhRobdQvbKX4v_13.py | 5561fb6cf05642ceb60cb7daef49927c53d87e6a | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,088 | py | """
You are given three inputs: a string, one letter, and a second letter.
Write a function that returns `True` if every instance of the first letter
occurs **before** every instance of the second letter.
### Examples
first_before_second("a rabbit jumps joyfully", "a", "j") ➞ True
# Every instance of "a" occurs before every instance of "j".
first_before_second("knaves knew about waterfalls", "k", "w") ➞ True
first_before_second("happy birthday", "a", "y") ➞ False
# The "a" in "birthday" occurs after the "y" in "happy".
first_before_second("precarious kangaroos", "k", "a") ➞ False
### Notes
* All strings will be in lower case.
* All strings will contain the first and second letters at least **once**.
"""
def first_before_second(s, first, second):
ind1 = []
for m in enumerate(s):
if m[1] == first:
ind1.append(m[0])
ind2 = []
for m in enumerate(s):
if m[1] == second:
ind2.append(m[0])
if max(ind1) < min(ind2):
return True
else:
return False
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
89a3190c1a4237cb81fa42e43e1b2a8bf7ff2925 | 27d92b640d3814fa5dc8040b79a99d077cba3aae | /cpython/Tools/scripts/combinerefs.py | e10e49ad7c72b37991ca927e805f40223ee75636 | [
"GPL-1.0-or-later",
"Python-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-python-cwi"
] | permissive | ms-iot/python | 99a0f4d3dd3926703d49b75910c78c69cdb7aed7 | a8f8fba1214289572713520f83409762a4446fea | refs/heads/develop | 2022-12-07T23:26:31.339811 | 2017-11-17T02:24:32 | 2017-11-17T02:24:32 | 31,045,533 | 73 | 39 | BSD-3-Clause | 2022-11-16T20:24:24 | 2015-02-20T01:01:09 | Python | UTF-8 | Python | false | false | 4,414 | py | #! /usr/bin/env python3
"""
combinerefs path
A helper for analyzing PYTHONDUMPREFS output.
When the PYTHONDUMPREFS envar is set in a debug build, at Python shutdown
time Py_Finalize() prints the list of all live objects twice: first it
prints the repr() of each object while the interpreter is still fully intact.
After cleaning up everything it can, it prints all remaining live objects
again, but the second time just prints their addresses, refcounts, and type
names (because the interpreter has been torn down, calling repr methods at
this point can get into infinite loops or blow up).
Save all this output into a file, then run this script passing the path to
that file. The script finds both output chunks, combines them, then prints
a line of output for each object still alive at the end:
address refcnt typename repr
address is the address of the object, in whatever format the platform C
produces for a %p format code.
refcnt is of the form
"[" ref "]"
when the object's refcount is the same in both PYTHONDUMPREFS output blocks,
or
"[" ref_before "->" ref_after "]"
if the refcount changed.
typename is object->ob_type->tp_name, extracted from the second PYTHONDUMPREFS
output block.
repr is repr(object), extracted from the first PYTHONDUMPREFS output block.
CAUTION: If object is a container type, it may not actually contain all the
objects shown in the repr: the repr was captured from the first output block,
and some of the containees may have been released since then. For example,
it's common for the line showing the dict of interned strings to display
strings that no longer exist at the end of Py_Finalize; this can be recognized
(albeit painfully) because such containees don't have a line of their own.
The objects are listed in allocation order, with most-recently allocated
printed first, and the first object allocated printed last.
Simple examples:
00857060 [14] str '__len__'
The str object '__len__' is alive at shutdown time, and both PYTHONDUMPREFS
output blocks said there were 14 references to it. This is probably due to
C modules that intern the string "__len__" and keep a reference to it in a
file static.
00857038 [46->5] tuple ()
46-5 = 41 references to the empty tuple were removed by the cleanup actions
between the times PYTHONDUMPREFS produced output.
00858028 [1025->1456] str '<dummy key>'
The string '<dummy key>', which is used in dictobject.c to overwrite a real
key that gets deleted, grew several hundred references during cleanup. It
suggests that stuff did get removed from dicts by cleanup, but that the dicts
themselves are staying alive for some reason. """
import re
import sys
# Generate lines from fileiter. If whilematch is true, continue reading
# while the regexp object pat matches line. If whilematch is false, lines
# are read so long as pat doesn't match them. In any case, the first line
# that doesn't match pat (when whilematch is true), or that does match pat
# (when whilematch is false), is lost, and fileiter will resume at the line
# following it.
def read(fileiter, pat, whilematch):
for line in fileiter:
if bool(pat.match(line)) == whilematch:
yield line
else:
break
def combine(fname):
f = open(fname)
fi = iter(f)
for line in read(fi, re.compile(r'^Remaining objects:$'), False):
pass
crack = re.compile(r'([a-zA-Z\d]+) \[(\d+)\] (.*)')
addr2rc = {}
addr2guts = {}
before = 0
for line in read(fi, re.compile(r'^Remaining object addresses:$'), False):
m = crack.match(line)
if m:
addr, addr2rc[addr], addr2guts[addr] = m.groups()
before += 1
else:
print('??? skipped:', line)
after = 0
for line in read(fi, crack, True):
after += 1
m = crack.match(line)
assert m
addr, rc, guts = m.groups() # guts is type name here
if addr not in addr2rc:
print('??? new object created while tearing down:', line.rstrip())
continue
print(addr, end=' ')
if rc == addr2rc[addr]:
print('[%s]' % rc, end=' ')
else:
print('[%s->%s]' % (addr2rc[addr], rc), end=' ')
print(guts, addr2guts[addr])
f.close()
print("%d objects before, %d after" % (before, after))
if __name__ == '__main__':
combine(sys.argv[1])
| [
"juanyaw@exchange.microsoft.com"
] | juanyaw@exchange.microsoft.com |
d521134ee74c8fcdabfbf01d11310000cd770fd8 | 5e55858ef75c62921f8be2f2f6d19ebfe98a2d88 | /5kyu/two_strings.py | 3d70a10b12c4e90f6d10caddd13f76cfbe11546d | [] | no_license | Uthaeus/codewars_python | 63abd96b66cb81f86e05b244a24c2c4de2f321e4 | 4b00c74ce0173bcf8527da7e4ef381d6802dde16 | refs/heads/master | 2021-06-01T10:34:30.688941 | 2020-11-01T19:17:32 | 2020-11-01T19:17:32 | 134,450,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | # Input Strings a and b: For every character in string a swap the casing of every occurrence of the same character in string b. Then do the same casing swap with the inputs reversed. Return a single string consisting of the changed version of a followed by the changed version of b. A char of a is in b regardless if it's in upper or lower case - see the testcases too.
def arrayify(s):
result = []
for c in s:
result.append(c)
return result
def work_on_strings(a,b):
x = 0
a = arrayify(a)
b = arrayify(b)
while x < len(a):
y = 0
while y < len(b):
if a[x].lower() == b[y].lower():
b[y] = b[y].swapcase()
y += 1
x += 1
x = 0
while x < len(b):
y = 0
while y < len(a):
if b[x].lower() == a[y].lower():
a[y] = a[y].swapcase()
y += 1
x += 1
return "".join(a) + "".join(b)
print(work_on_strings("abc","cde")) #, "abCCde" | [
"romanlavery@gmail.com"
] | romanlavery@gmail.com |
69bbcb51206c9a2dcbff2e6cc1eaf45346263a7a | 191a7f83d964f74a2b3c7faeb4fc47d9c63d521f | /.history/main_20210529113936.py | 21e3cf9871b43012cc06f98807e2c0817b4cbdec | [] | no_license | AndreLiu1225/Kinder-Values-Survey | 2a317feee8d5b17c27da2b2116742656e35d8ab9 | 090c27da0c822abb7dfc0ec6e13ae1b3dcb7bbf3 | refs/heads/master | 2023-05-03T00:26:00.481423 | 2021-06-04T03:24:19 | 2021-06-04T03:24:19 | 371,989,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,921 | py | from flask import Flask, render_template, redirect, url_for, flash, request
from flask_sqlalchemy import SQLAlchemy
from flask_wtf import FlaskForm
from wtforms import StringField, TextField, SubmitField, IntegerField, SelectField, RadioField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
import datetime
import matplotlib.pyplot as plt
app = Flask(__name__)
app.config['SECRET_KEY'] = "0c8973c8a5e001bb0c816a7b56c84f3a"
app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///site.db"
db = SQLAlchemy(app)
class Survey(db.Model):
age = db.Column(db.Integer, nullable=False, primary_key=True)
email = db.Column(db.String(50), unique=True, nullable=False)
profession = db.Column(db.String(50), nullable=False)
power = db.Column(db.Integer, nullable=False)
tradition = db.Column(db.Integer, nullable=False)
achievement = db.Column(db.Integer, nullable=False)
stimulation = db.Column(db.Integer, nullable=False)
hedonism = db.Column(db.Integer, nullable=False)
conformity = db.Column(db.Integer, nullable=False)
security = db.Column(db.Integer, nullable=False)
self_direction = db.Column(db.Integer, nullable=False)
benevolence = db.Column(db.Integer, nullable=False)
universalism = db.Column(db.Integer, nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.datetime.utcnow)
def __repr__(self):
return f"Survey('{self.age}', '{self.name}', '{self.date_posted}')"
class MCQ(FlaskForm):
email = StringField("What is your email?", validators=[DataRequired(), Email(message=('Not a valid email address')), Length(max=50)])
age = IntegerField("Please enter your age", validators=[DataRequired()])
profession = StringField("What is your profession?", validators=[DataRequired(), Length(max=30)])
# Self-Enhancement
power = IntegerField("Do you desire a higher social status and dominance over others? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
hedonism = IntegerField("Is personal gratification the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
achievement = IntegerField("Is achievement according to social standards important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Conservation
tradition = IntegerField("Do you care about preserving traditions? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
conformity = IntegerField("Do you think restraint of actions against social norms is important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
security = IntegerField("Do you value safety, harmony and stability of society, of relationships, and of self? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Openness to change
stimulation = IntegerField("Do you prefer novel and exciting challenges in life? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
self_direction = IntegerField("Do you think independent thought and action are important (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
# Self-transcendence
benevolence = IntegerField("Are preserving and enhancing the welfare of your friends and family the most important? (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
universalism = IntegerField("I find it important to understand, tolerate, appreciate and protect all ethnicities and people. (4- It is my utmost priority, 3-It is important, 2-Doesn't bother me, 1-Not even a thought)", validators=[DataRequired()])
submit = SubmitField("Submit")
@app.route('/', methods=['POST','GET'])
def values_quiz():
form = MCQ()
if form.validate_on_submit():
post = Survey(age=form.age.data, email=form.email.data, profession=form.profession.data, power=form.power.data,
tradition=form.tradition.data, achievement=form.achievement.data, stimulation=form.stimulation.data,
hedonism=form.hedonism.data, conformity=form.conformity.data, self_direction=form.self_direction.data,
benevolence=form.benevolence.data, universalism=form.universalism.data)
# if Survey.is_email_in_database(form.email.data):
# flash(f"The user with {form.email.data} has already filled the survey", "danger")
db.session.add(post)
db.session.commit()
flash(f'Survey is completed by {form.email.data}', 'success')
return redirect(url_for('data_dashboard'))
else:
flash('Ensure all questions are answered correctly', 'warning')
return render_template('MCQ.html', form=form)
@app.route('/results', methods=['GET'])
def data_dashboard():
power = request.form.get('power')
tradition = request.form.get('tradition')
achievement = request.form.get('achievement')
stimulation = request.form.get('stimulation')
hedonism = request.form.get('hedonism')
conformity = request.form.get('conformity')
security = request.form.get('security')
self_direction = request.form.get('self_direction')
benevolence = request.form.get('benevolence')
universalism = request.form.get('universalism')
values = [power, tradition, achievement, stimulation, hedonism, conformity, security, self_direction, benevolence, universalism]
values_labels = ['Openness to Change', 'Self-Transcendence',
'Conservation', 'Self-Enchancement']
openness = [hedonism, stimulation, self_direction]
self_enhancement = [hedonism, achievement, power]
conservation = [tradition, conformity, security]
self_trans = [universalism, benevolence]
total_sum = sum(values)
open_sum = round(sum(openness)/total_sum*100)
enhance_sum = round(sum(self_enhancement)/total_sum*100)
trans_sum = round(sum(self_trans)/total_sum*100)
cons_sum = round(sum(conservation)/total_sum*100)
sum_v = [open_sum, enhance_sum, trans_sum, cons_sum]
# initiating the range of y ticks
ran = [20,40,60,80,100]
plt.xticks(ran, values_labels)
# Calling bar plot function
plt.bar(ran, sum_v)
plt.title('Percentage obtained on each dynamic values')
plt.ylabel('Percentage')
plt.xlabel('Dynamic value types')
return render_template('data_dashboard.html', image=plt.show())
if __name__ == "__main__":
app.run(debug=True)
| [
"andreliu2004@gmail.com"
] | andreliu2004@gmail.com |
98dea60f0782c91c98ee5568018f96a7c2856a04 | 532e4cdd9c0b72c444b13ef669b788f3c629074d | /expo/scheduler/data_transfer.py | c893cfa4ba5327b4d2a2c5fedbf8f0ed2460ac61 | [] | no_license | bethlakshmi/GBE2 | 3b2b5a677637759fd22220f3272336d9dfc5750e | e8e030a12946901ccb9f56a9a3c6a022a6a8c5c8 | refs/heads/master | 2021-01-23T09:02:40.253304 | 2019-05-01T01:39:51 | 2019-05-01T01:39:51 | 17,325,575 | 7 | 3 | null | 2019-04-29T03:52:18 | 2014-03-01T22:14:43 | Python | UTF-8 | Python | false | false | 4,791 | py |
class Person(object):
def __init__(self,
booking_id=None,
user=None,
public_id=None,
public_class="Performer",
role=None,
label=None,
worker=None):
if worker:
self.role = worker.role
self.user = worker._item.as_subtype.user_object
self.public_class = worker._item.as_subtype.__class__.__name__
self.public_id = worker._item.pk
else:
self.user = user
self.public_id = public_id
self.role = role
self.public_class = public_class
self.booking_id = booking_id
self.label = label
class Casting(object):
def __init__(self,
booking):
self.booking_id = booking.pk
self.role = booking.resource.actresource.role
self.act = booking.resource.actresource._item
class ScheduleItem(object):
def __init__(self,
user=None,
group_id=None,
event=None,
role=None,
label=None,
booking_id=None):
self.user = user
self.group_id = group_id
self.role = role
self.label = label
self.event = event
self.booking_id = booking_id
class Answer(object):
def __init__(self,
question=None,
value=None):
self.question = question
self.value = value
class Warning(object):
def __init__(self,
code=None,
user=None,
occurrence=None,
details=None):
self.code = code
self.user = user
self.occurrence = occurrence
self.details = details
class Error(object):
def __init__(self,
code=None,
details=None):
self.code = code
self.details = details
class GeneralResponse(object):
def __init__(self,
warnings=[],
errors=[]):
self.warnings = warnings
self.errors = errors
class OccurrenceResponse(GeneralResponse):
def __init__(self,
occurrence=None,
warnings=[],
errors=[]):
self.occurrence = occurrence
super(OccurrenceResponse, self).__init__(warnings, errors)
class OccurrencesResponse(GeneralResponse):
def __init__(self,
occurrences=[],
warnings=[],
errors=[]):
self.occurrences = occurrences
super(OccurrencesResponse, self).__init__(warnings, errors)
class PersonResponse(GeneralResponse):
def __init__(self,
booking_id=None,
warnings=[],
errors=[]):
self.booking_id = booking_id
super(PersonResponse, self).__init__(warnings, errors)
class PeopleResponse(GeneralResponse):
def __init__(self,
people=[],
warnings=[],
errors=[]):
self.people = people
super(PeopleResponse, self).__init__(warnings, errors)
class CastingResponse(GeneralResponse):
def __init__(self,
castings=[],
warnings=[],
errors=[]):
self.castings = castings
super(CastingResponse, self).__init__(warnings, errors)
class ScheduleResponse(GeneralResponse):
def __init__(self,
schedule_items=[],
warnings=[],
errors=[]):
self.schedule_items = schedule_items
super(ScheduleResponse, self).__init__(warnings, errors)
class RolesResponse(GeneralResponse):
def __init__(self,
roles=[],
warnings=[],
errors=[]):
self.roles = roles
super(RolesResponse, self).__init__(warnings, errors)
class EvalInfoResponse(GeneralResponse):
def __init__(self,
occurrences=[],
questions=[],
answers=[],
warnings=[],
errors=[]):
self.occurrences = occurrences
self.questions = questions
self.answers = answers
super(EvalInfoResponse, self).__init__(warnings, errors)
class EvalSummaryResponse(GeneralResponse):
def __init__(self,
occurrences=[],
questions=[],
summaries={},
count=None,
warnings=[],
errors=[]):
self.occurrences = occurrences
self.questions = questions
self.summaries = summaries
self.count = count
super(EvalSummaryResponse, self).__init__(warnings, errors)
| [
"bethlakshmi@gmail.com"
] | bethlakshmi@gmail.com |
965cf3b5c4d6b07442c90621692a3c7c1c91d249 | 729243a020efed22445849c5cd95e78506f9845d | /Semana02/prog07.py | e9f57ada5268f62c217a8b7dbd7690afb0d86dd5 | [] | no_license | yuri-almeid/SEII-YuriLimaAlmeida | 6f031667943f469827bcb89db968d5b7a0188c2f | 81fbf275fcc74a99d8b3630c953aece416546416 | refs/heads/main | 2023-08-10T19:49:18.162469 | 2021-10-07T11:02:25 | 2021-10-07T11:02:25 | 347,435,913 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py |
def pass_func():
pass
def hello_func():
return 'Hello Function'
def hello_func_greeting(greeting, name='You'):
return '{}, {}'.format(greeting, name)
print(pass_func)
print(hello_func())
print(hello_func().upper())
print(hello_func_greeting('Bye'))
def student_info(*args, **kwargs):
print(args)
print(kwargs)
student_info('Math', 'Art', name='John', age=22)
courses = ['Math', 'Art']
info = {'name': 'John', 'age': 22}
student_info(courses, info)
student_info(*courses, **info)
month_days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def is_leap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def days_in_month(year, month):
"""Return number of days in that month in that year."""
if not 1 <= month <= 12:
return 'Invalid Month'
if month == 2 and is_leap(year):
return 29
return month_days[month]
print(days_in_month(2017, 2))
| [
"yurilima95@gmail.com"
] | yurilima95@gmail.com |
906f027f40c8a6910f73ee7bc7f3b9e2a224bb64 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/tlm/training/debugging_model/run_ck_lr_opt_copied.py | f06da55e5bada644bbbda70ee07688dfe9b8d142 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 1,400 | py |
from my_tf import tf
from taskman_client.wrapper import report_run
from tf_util.tf_logging import tf_logging, MuteEnqueueFilter
from tlm.model.base import BertConfig
from tlm.training.debugging_model.classification_opt_copied import model_fn_classification
from tlm.training.flags_wrapper import get_input_files_from_flags, show_input_files
from tlm.training.input_fn import input_fn_builder_use_second_input
from tlm.training.train_config import TrainConfigEx
from tlm.training.train_flags import *
from trainer.tpu_estimator import run_estimator
def run_classification_w_second_input():
input_files = get_input_files_from_flags(FLAGS)
bert_config = BertConfig.from_json_file(FLAGS.bert_config_file)
train_config = TrainConfigEx.from_flags(FLAGS)
show_input_files(input_files)
model_fn = model_fn_classification(
bert_config,
train_config,
)
input_fn = input_fn_builder_use_second_input(FLAGS)
if FLAGS.do_predict:
tf_logging.addFilter(MuteEnqueueFilter())
result = run_estimator(model_fn, input_fn)
return result
@report_run
def main(_):
return run_classification_w_second_input()
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
flags.mark_flag_as_required("run_name")
tf.compat.v1.app.run()
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
c40b20a7acd6aaddf25f265954ebe8ee03ced8d1 | 396d5838873d84145e5b125a8c1bc3db2313ac8f | /tests/conftest.py | cbca1a49f99b4a6ca40353b42289414df64a8a97 | [
"MIT"
] | permissive | magnologan/sqlalchemy_aio | 96480d2be46bae804b6bdc0e59568fb240d12b0e | 915b00bd024b29afa712749695c0ad4ced0e9c37 | refs/heads/master | 2023-05-26T18:44:48.870565 | 2016-12-19T01:36:00 | 2016-12-19T01:36:00 | 77,687,272 | 0 | 0 | NOASSERTION | 2023-05-17T01:59:44 | 2016-12-30T13:17:13 | Python | UTF-8 | Python | false | false | 1,110 | py | import pytest
from sqlalchemy import Column, Integer, MetaData, Table, create_engine, event
from sqlalchemy_aio import ASYNCIO_STRATEGY
def fix_pysqlite_transactions(engine):
"""See http://docs.sqlalchemy.org/en/latest/dialects/
sqlite.html#serializable-isolation-savepoints-transactional-ddl
"""
@event.listens_for(engine, 'connect')
def connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, 'begin')
def begin(conn):
# emit our own BEGIN
conn.execute('BEGIN')
@pytest.fixture
def engine(event_loop):
engine = create_engine('sqlite://', strategy=ASYNCIO_STRATEGY,
loop=event_loop)
fix_pysqlite_transactions(engine._engine)
return engine
@pytest.fixture
def mytable():
metadata = MetaData()
mytable = Table(
'mytable', metadata,
Column('id', Integer, primary_key=True),
)
return mytable
| [
"frazer@frazermclean.co.uk"
] | frazer@frazermclean.co.uk |
cbc9e2fa617c023d59f637da868347726bef60c7 | 7ba5ec9aa9ddca3f9b3384fc4457b0a865c2a0a1 | /src/397.py | 536dc63ec74a520ba95396ed731a8cdb9ac5918e | [] | no_license | ecurtin2/Project-Euler | 71f79ee90a9abd0943421677d78a6c087419e500 | 79479da7a45b3ae67c0c7ea24da5f7d43c6f25d3 | refs/heads/master | 2021-03-19T14:52:57.045443 | 2018-04-12T22:05:37 | 2018-04-12T22:05:37 | 100,059,180 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | """
On the parabola y = x2/k, three points A(a, a2/k), B(b, b2/k) and C(c, c2/k) are chosen.
Let F(K, X) be the number of the integer quadruplets (k, a, b, c) such that at least one angle of the triangle ABC is 45-degree, with 1 ≤ k ≤ K and -X ≤ a < b < c ≤ X.
For example, F(1, 10) = 41 and F(10, 100) = 12492.
Find F(106, 109).
""" | [
"ecurtin2@illinois.edu"
] | ecurtin2@illinois.edu |
fb1fc36e9c6ef029516926c94dcb77ea38e889f8 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /StructuredFund/creationRedemption/YW_GPMM_SZXJ_068.py | b95472014ee745c0484b0c9227976c740b7b3614 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/StructuredFund/serviceCreationRedemption")
from mainService import *
from QueryStructuredFundInfo import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
class YW_GPMM_SZXJ_068(xtp_test_case):
# YW_GPMM_SZXJ_068
def test_YW_GPMM_SZXJ_068(self):
title='限价委托卖-部成撤单 '
#定义当前测试用例的期待值
#期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
#xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '部撤',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStructuredFundInfo('999999','2','0','2','0','S',case_goal['期望状态'],Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果':stkparm['返回结果'],
'测试错误原因':'获取下单参数失败,'+stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity': 200
}
ParmIni(Api,case_goal['期望状态'],wt_reqs['price_type'])
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ',' + str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
fcd715b6160b971e53992a3d28a0a9732a3fc8b2 | a893537a71aa285071a68035c968ba6f5c0ca57d | /ch08/79/79_one_layer_linear.py | 920abd33cdaa3671c3f03e7b8b86fca8e0ed3ea2 | [] | no_license | sinchir0/2020_NLP100 | 0a1810b0c299c29fa1a811f68fa87be74f9b3cf9 | 772123da5b5ac4094c26fdce2e192637dc55190a | refs/heads/main | 2023-07-18T04:03:04.123302 | 2021-09-08T22:54:44 | 2021-09-08T22:54:44 | 257,416,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,181 | py | # 79. 多層ニューラルネットワーク
# 問題78のコードを改変し,バイアス項の導入や多層化など,ニューラルネットワークの形状を変更しながら,高性能なカテゴリ分類器を構築せよ.
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
class TextDataset(Dataset):
def __init__(self, X, y):
self.X = X
self.y = y
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
return self.X[idx], self.y[idx]
class Net(nn.Module):
def __init__(self, in_shape: int, out_shape: int):
super().__init__()
self.fc = nn.Linear(300, 4, bias=True)
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.fc(x)
x = self.softmax(x)
return x
def train_fn(model, loader, optimizer, loss) -> Union[float, float]:
"""model, loaderを用いて学習を行い、lossを返す"""
train_running_loss = 0.0
valid_running_loss = 0.0
for dataloader_x, dataloader_y in loader:
optimizer.zero_grad()
dataloader_y_pred_prob = model(dataloader_x)
# dataset_xでの損失の計算
dataloader_loss = loss(dataloader_y_pred_prob, dataloader_y)
dataloader_loss.backward()
# 訓練データ、検証データでの損失の平均を計算する
train_running_loss += dataloader_loss.item() / len(loader)
valid_running_loss += loss(model(valid_x), valid_y).item() / len(loader)
optimizer.step()
return train_running_loss, valid_running_loss
def calc_acc(model, train_x, y_true) -> float:
"""modelと学習データ、正解データを用いて、正解率を計算する"""
# 最も正解率の高い予測確率を正解ラベルとする。
_, y_pred = torch.max(model(train_x), 1)
# 学習データに対する正解率の計算
correct_num = (y_pred == y_true).sum().item()
total_size = y_true.size(0)
acc = (correct_num / total_size) * 100
return acc
def make_graph(value_dict: dict, value_name: str, method: str) -> None:
"""value_dictに関するgraphを生成し、保存する。"""
for phase in ["train", "valid"]:
plt.plot(value_dict[phase], label=phase)
plt.xlabel("epoch")
plt.ylabel(value_name)
plt.title(f"{value_name} per epoch")
plt.legend()
plt.savefig(f"{method}_{value_name}.png")
plt.close()
if __name__ == "__main__":
METHOD = "one_layer_linear"
if not torch.cuda.is_available():
print("No cuda")
PATH = ".."
device = (
torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
)
# 学習データの読み込み
train_x = torch.tensor(
np.load(f"{PATH}/70/train_vector.npy"), requires_grad=True
).to(device)
train_y = torch.tensor(np.load(f"{PATH}/70/train_label.npy")).to(device)
# 評価データの読み込み
valid_x = torch.tensor(
np.load(f"{PATH}/70/valid_vector.npy"), requires_grad=True
).to(device)
valid_y = torch.tensor(np.load(f"{PATH}/70/valid_label.npy")).to(device)
# modelの設定
model = Net(in_shape=train_x.shape[1], out_shape=4).to(device)
# loss, optimizerの設定
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
# DataLoaderの構築
dataset = TextDataset(train_x, train_y)
# parameterの更新
BATCHSIZE = 32
loader = DataLoader(dataset, batch_size=BATCHSIZE, shuffle=True)
train_losses = []
train_accs = []
valid_losses = []
valid_accs = []
EPOCH = 10
for epoch in tqdm(range(EPOCH)):
# 学習
train_running_loss, valid_running_loss = train_fn(
model, loader, optimizer, loss
)
# 訓練データでの損失の保存
train_losses.append(train_running_loss)
# 訓練データでの正解率の計算
train_acc = calc_acc(model, train_x, train_y)
# 訓練データでの正解率の保存
train_accs.append(train_acc)
# 検証データでの損失の保存
valid_losses.append(valid_running_loss)
# 検証データでの正解率の計算
valid_acc = calc_acc(model, valid_x, valid_y)
# 検証データでの正解率の保存
valid_accs.append(valid_acc)
# 20epoch毎にチェックポイントを生成
if epoch % 20 == 0:
torch.save(model.state_dict(), f"79_model_bs_epoch{epoch}.pth")
torch.save(
optimizer.state_dict(),
f"79_optimizer_epoch{epoch}.pth",
)
# グラフへのプロット
losses = {"train": train_losses, "valid": valid_losses}
accs = {"train": train_accs, "valid": valid_accs}
make_graph(losses, "losses", METHOD)
make_graph(accs, "accs", METHOD)
print(f"train_acc: {train_acc}")
print(f"valid_acc: {valid_acc}")
# train_acc: 76.90217391304348
# valid_acc: 78.71064467766116
| [
"s-saito@chic.ocn.ne.jp"
] | s-saito@chic.ocn.ne.jp |
fd004e23d8775a14f91d226e6f75843c058b5f43 | 369b7f114f9bd9b45dd5fef77a070cb73abb68d1 | /handle/itl/temp/handleForWh.py | adbc5ffe74a6e805871bce67b303dfa25a1570e6 | [] | no_license | lyjloveabc/thor_handle | d790ee25317f724825c94a6b346a034ec0ae6e3d | 8b9eda97ec873f3bf1732a428898a04d6a55c0af | refs/heads/master | 2021-12-27T10:15:16.668264 | 2021-08-16T13:45:34 | 2021-08-16T13:45:34 | 84,824,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | base_sql = 'INSERT INTO itl_job_title VALUES ("{id}", now(), now(), "{name}", "", "{remark}", "公司总部");'
with open('hehe.txt', 'r') as f:
index = 19
for line in f.readlines():
hehe = line[:-1].split(' ')
print(base_sql.format(id=index, name=hehe[0], remark=hehe[1]))
index += 1
| [
"546223592@qq.com"
] | 546223592@qq.com |
ae227652a174fe7076a5a971d1021bb31d494c08 | e2c84bbefe728e20042a6befdf9effd0480b6cf0 | /Text_processing/2. Character Multiplier.py | e2632389cf8db53afacd592a70c798b43c0c9eee | [] | no_license | vmakksimov/PythonFundamentals | ffe5f606f592a9f0650f45f225936f13f4659992 | 4a5b74e40cfaa19f777187404428e0dff9d66a16 | refs/heads/main | 2023-08-22T03:33:15.002041 | 2021-10-15T00:07:58 | 2021-10-15T00:07:58 | 410,258,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | strings = input().split()
string1 = strings[0]
string2 = strings[1]
total_sum = 0
first_sting = []
second_digit = []
for chr in string1:
first_sting.append(ord(chr))
for chr in string2:
second_digit.append(ord(chr))
cycle_iterations = min(len(first_sting), len(second_digit))
max_cycle = max(len(first_sting), len(second_digit)) - cycle_iterations
if len(first_sting) != len(second_digit):
if len(first_sting) > len(second_digit):
for i in range(0, cycle_iterations):
product = first_sting[i] * second_digit[i]
total_sum += product
for m in range(cycle_iterations, len(first_sting)):
total_sum += first_sting[m]
elif len(first_sting) < len(second_digit):
for i in range(0, cycle_iterations):
product = first_sting[i] * second_digit[i]
total_sum += product
for m in range(cycle_iterations, len(second_digit)):
total_sum += second_digit[m]
else:
for i in range(0, cycle_iterations):
product = first_sting[i] * second_digit[i]
total_sum += product
print(total_sum)
| [
"vmakksimov@gmail.com"
] | vmakksimov@gmail.com |
8100d9893fc619b66301d1b5f001351c8053147b | 3fcfdcca48457b6166c220e6cea31d610b87b7bf | /viseducat_exam/wizard/__init__.py | 76cb73290e35b6c8dbc6033c6988bced4fac0c0c | [] | no_license | celaltas/ViseducatCommunity | 8e0990667e847730f493ee2b223620c8476bc5c8 | 16ca6ce9ec31aa92bb9e5df934d97ee9b603f553 | refs/heads/master | 2023-05-05T10:15:39.215267 | 2021-05-24T20:47:24 | 2021-05-24T20:47:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | from . import held_exam
from . import room_distribution
from . import student_hall_tickets_wizard | [
"celal.tas123@gmail.com"
] | celal.tas123@gmail.com |
911acf65537c8dd5a41f4d656e1c3818bae26905 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2307/60581/234662.py | d8aff6f4b42dcf471b1cc7a0261a810c5e9f8fef | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | import sys
lst = []
for line in sys.stdin:
if line.strip() == '':
break
lst.append(line)
number = lst[0]
wholeCount = 0
beginNumber = 1
while wholeCount < int(number):
answer = []
numberOfList = int(lst[beginNumber])
numberList = lst[beginNumber+1].split()
for i in range(0,len(numberList)) :
if numberList.count(numberList[i]) >= numberOfList/2 :
if answer.count(numberList[i])==0 :
answer.append(numberList[i])
if len(answer) == 0:
answer.append(-1)
print(int(answer[0]))
beginNumber += 2
wholeCount += 1
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
a3a2782843a392afae3025bc78ecd02479d01184 | 19d9d25bf1de4007f42abc43acaa23d66c3428ab | /apps/about/admin.py | 8eeb24983365004463d91b92632619725eadc495 | [] | no_license | od-5/enjoy-sa | aa567268941d818212cf1e0b3358df470f4c29e9 | 5e3600aaab3e4f405680f9f32bffb4be0f2bf601 | refs/heads/master | 2021-01-10T05:48:06.442696 | 2016-03-23T11:48:49 | 2016-03-23T11:48:49 | 45,059,910 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | # coding=utf-8
from django.contrib import admin
from .models import About
__author__ = 'alexy'
class AboutAdmin(admin.ModelAdmin):
list_display = ('title',)
admin.site.register(About, AboutAdmin)
| [
"od-5@yandex.ru"
] | od-5@yandex.ru |
ba7472cea52dbd9a622962746d153fa5d21696f7 | 4723fed48970c7bcc50eaa3acbe1c66577f8f3cb | /ss/download/ss_coroutine.py | b939bdb56235bd2575c73012a01e8d8d6d4be429 | [] | no_license | myetc4/spider | c03157f2203ea3c0ae0af30dc66e810f0ca20d06 | 4ffb36bc2551c1494ebdf0357da69ebb1c6c524d | refs/heads/master | 2020-04-02T01:44:23.358947 | 2018-10-20T03:43:35 | 2018-10-20T03:43:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/10/18 22:07
# @Author : SmallStrong
# @Des :
# @File : ss_coroutine.py
# @Software: PyCharm
import sys
import os
# 被逼无奈
sys.path.append(os.getcwd().replace('/ss', ''))
from ss.spider_core import go
from ss.common.func import exe_time
from gevent import monkey, pool
import ss.config
monkey.patch_all()
@exe_time
def main():
p = pool.Pool(ss.config.COROUTINE_LIMIT_NUM)
while ss.config.FLAG:
p.spawn(go)
if __name__ == '__main__':
main()
| [
"393019766@qq.com"
] | 393019766@qq.com |
4374be84930a4f11eb60ad2e0cdd4aaf8ed777ac | bbf744bfbfd9a935bd98c7cf54152a5d41194161 | /chapter_05/e5-7_favorite_fruit.py | 784ea00cac661066d715db00f0f223d403b8582c | [] | no_license | terranigmark/python-crash-course-projects | 65a7863be2d26fe8b91ac452b12203386eb0259a | 79ed9ed8e6a1bf015990a9556689379274231d13 | refs/heads/master | 2022-12-05T21:59:00.352140 | 2020-08-21T04:59:50 | 2020-08-21T04:59:50 | 266,263,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py |
def main():
favorite_fruits = ['melon', 'watermelon', 'apple', 'pineapple', 'banana', 'cranberry']
for times in range(5):
fruit = str(input("Pick a fruit to know if it's on my list: "))
if fruit in favorite_fruits:
print(f"You really like {fruit}")
if __name__ == "__main__":
main() | [
"linnk99@gmail.com"
] | linnk99@gmail.com |
ae46917b4e5e1ee29516b367b756951f1ce8df78 | f60b964dc39ba54bb84f1c4949be3b91a92b8346 | /issue_order/tasks.py | c521a2ede5ab11c2f598a398a3bee9e017fc01e6 | [
"Apache-2.0"
] | permissive | jiejiang/courier | 4b0b4fc56c5510228ffcc4de51b074c7aff9502f | 6fdeaf041c77dba0f97e206adb7b0cded9674d3d | refs/heads/master | 2022-11-30T14:24:53.950502 | 2019-12-06T16:42:00 | 2019-12-06T16:42:00 | 195,387,643 | 0 | 0 | Apache-2.0 | 2022-11-22T01:22:33 | 2019-07-05T10:08:19 | Python | UTF-8 | Python | false | false | 2,650 | py | # *- coding: utf-8 -*
from __future__ import absolute_import
from django.utils.translation import ugettext as _
import datetime, sys
from celery import shared_task
from django.db import transaction
from mezzanine.conf import settings
from .models import CourierBatch, Profile
from .courier_systems import query_courier_batch
@shared_task
def sample_task():
print settings.COURIER_SYSTEMS
print CourierBatch.objects.filter(state=CourierBatch.STATUS[0][0]).all()
@shared_task
def sync_waiting_courier_batches():
for courier_batch in CourierBatch.objects.filter(state=CourierBatch.STATUS[0][0]):
try:
if not courier_batch.system in settings.COURIER_SYSTEMS:
raise Exception, "System not configured: %s" % courier_batch.system
system_config = settings.COURIER_SYSTEMS[courier_batch.system]
if not 'url_base' in system_config or not 'user_name' in system_config or not 'password' in system_config:
raise Exception, "Invalid system_config: %s" % str(system_config)
batch_obj = query_courier_batch(system_config['url_base'], system_config['user_name'],
system_config['password'], courier_batch.uuid)
if batch_obj['status'] == "Waiting":
courier_batch.status = _(u"等待中")
elif batch_obj['status'] == "Processing":
courier_batch.status = _(u"处理中")
elif batch_obj['status'] == "Completed":
courier_batch.state, courier_batch.status = CourierBatch.STATUS[1]
elif batch_obj['status'] == "Failed":
courier_batch.state, courier_batch.status = CourierBatch.STATUS[2]
if courier_batch.credit is not None:
with transaction.atomic():
profile = Profile.objects.select_for_update().get(user=courier_batch.user)
profile.credit += courier_batch.credit
profile.save()
courier_batch.credit = 0
elif batch_obj['status'] == "Deleted":
courier_batch.state, courier_batch.status = CourierBatch.STATUS[3]
else:
raise Exception, "Batch obj status invalid: %s" % str(batch_obj)
courier_batch.percentage = batch_obj['percentage']
courier_batch.message = batch_obj['message']
courier_batch.save()
except Exception, inst:
import traceback
traceback.print_exc(sys.stderr)
print >> sys.stderr, "Failed to sync batch: %s" % courier_batch.uuid | [
"mail.jie.jiang@gmail.com"
] | mail.jie.jiang@gmail.com |
f75e0e32e45291a28fc904599882abe66c9d2750 | e8ae11e5017507da59e2e92d423b6a1994490de4 | /env/lib/python2.7/site-packages/azure/mgmt/compute/models/virtual_machine_scale_set_ip_configuration.py | 191b9e2ebb24995c979775a4515a352dcd782cd6 | [] | no_license | teopeurt/ansible-ubuntu-server | 613d00cea28bc6531acf4a39aeeb9cd0baa2a391 | b5b6127d2ee9723c5088443efe2ffb8ae30cfea7 | refs/heads/master | 2021-06-28T12:49:50.935753 | 2017-07-31T17:34:33 | 2017-07-31T17:34:33 | 98,912,808 | 0 | 1 | null | 2020-07-24T00:05:31 | 2017-07-31T17:32:56 | Makefile | UTF-8 | Python | false | false | 3,399 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class VirtualMachineScaleSetIPConfiguration(SubResource):
"""
Describes a virtual machine scale set network profile's IP configuration.
:param id: Resource Id
:type id: str
:param name: Gets or sets the IP configuration name.
:type name: str
:param subnet: Gets or sets the subnet.
:type subnet: :class:`ApiEntityReference
<azure.mgmt.compute.models.ApiEntityReference>`
:param application_gateway_backend_address_pools: Gets or sets the
application gateway backend address pools.
:type application_gateway_backend_address_pools: list of
:class:`SubResource <azure.mgmt.compute.models.SubResource>`
:param load_balancer_backend_address_pools: Gets or sets the load
balancer backend address pools.
:type load_balancer_backend_address_pools: list of :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
:param load_balancer_inbound_nat_pools: Gets or sets the load balancer
inbound nat pools.
:type load_balancer_inbound_nat_pools: list of :class:`SubResource
<azure.mgmt.compute.models.SubResource>`
"""
_validation = {
'name': {'required': True},
'subnet': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'ApiEntityReference'},
'application_gateway_backend_address_pools': {'key': 'properties.applicationGatewayBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_backend_address_pools': {'key': 'properties.loadBalancerBackendAddressPools', 'type': '[SubResource]'},
'load_balancer_inbound_nat_pools': {'key': 'properties.loadBalancerInboundNatPools', 'type': '[SubResource]'},
}
def __init__(self, name, subnet, id=None, application_gateway_backend_address_pools=None, load_balancer_backend_address_pools=None, load_balancer_inbound_nat_pools=None):
super(VirtualMachineScaleSetIPConfiguration, self).__init__(id=id)
self.name = name
self.subnet = subnet
self.application_gateway_backend_address_pools = application_gateway_backend_address_pools
self.load_balancer_backend_address_pools = load_balancer_backend_address_pools
self.load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
| [
"me@teopeurt.com"
] | me@teopeurt.com |
a1de48a0569407301769af6688ace732cfce8112 | f9248ec00b661ee4790a780b7adaec79c0d68ec8 | /sumDigit.py | ed3440baa63aed50363a7e23b2a7dcc418c05215 | [] | no_license | michaelzh17/6001_Python | 0ec463f02840bf3162cd3247d76494d1592e82e3 | 53833604db4d769f71e63044813e3500f3e0fb6f | refs/heads/master | 2021-01-11T22:31:17.832117 | 2018-12-21T10:34:18 | 2018-12-21T10:34:18 | 78,979,863 | 0 | 0 | null | 2017-04-06T02:54:57 | 2017-01-15T00:07:38 | Python | UTF-8 | Python | false | false | 366 | py | #!/usr/bin/env python3
def sumDigits(s):
"""Assumes s is a string
Returns the sum of the decimal digits in s
For example, if s is 'a2b3c' it returns 5"""
sum_digit = 0
for e in s:
try:
sum_digit += int(e)
except ValueError:
continue
return sum_digit
s = 'x5a9n2'
a = sumDigits(s)
print(a)
| [
"macalzhang@gmail.com"
] | macalzhang@gmail.com |
be37b4349655bde35cdd96eb797c2a72b4d8bc78 | 704976ea552111c6a5af9cd7cb62b9d9abaf3996 | /pypy/module/test_lib_pypy/ctypes_tests/test_cfuncs.py | 5d3d816ee52701354d65e960dff269715097ef56 | [
"BSD-3-Clause"
] | permissive | mesalock-linux/mesapy | 4f02c5819ce7f2f6e249d34840f1aa097577645d | ed546d59a21b36feb93e2309d5c6b75aa0ad95c9 | refs/heads/mesapy2.7 | 2023-08-16T21:33:02.239581 | 2019-08-13T10:29:43 | 2019-08-13T18:06:45 | 136,080,721 | 396 | 33 | NOASSERTION | 2020-04-01T03:05:18 | 2018-06-04T20:45:17 | Python | UTF-8 | Python | false | false | 6,882 | py | # A lot of failures in these tests on Mac OS X.
# Byte order related?
from ctypes import *
import py
from .support import BaseCTypesTestChecker
def setup_module(mod):
import conftest
mod._ctypes_test = str(conftest.sofile)
# this means you cannot run tests directly without invoking this
mod.TestCFunctions._dll = CDLL(_ctypes_test)
class TestCFunctions(BaseCTypesTestChecker):
def S(self):
return c_longlong.in_dll(self._dll, "last_tf_arg_s").value
def U(self):
return c_ulonglong.in_dll(self._dll, "last_tf_arg_u").value
def test_byte(self):
self._dll.tf_b.restype = c_byte
self._dll.tf_b.argtypes = (c_byte,)
assert self._dll.tf_b(-126) == -42
assert self.S() == -126
def test_byte_plus(self):
self._dll.tf_bb.restype = c_byte
self._dll.tf_bb.argtypes = (c_byte, c_byte)
assert self._dll.tf_bb(0, -126) == -42
assert self.S() == -126
def test_ubyte(self):
self._dll.tf_B.restype = c_ubyte
self._dll.tf_B.argtypes = (c_ubyte,)
assert self._dll.tf_B(255) == 85
assert self.U() == 255
def test_ubyte_plus(self):
self._dll.tf_bB.restype = c_ubyte
self._dll.tf_bB.argtypes = (c_byte, c_ubyte)
assert self._dll.tf_bB(0, 255) == 85
assert self.U() == 255
def test_short(self):
self._dll.tf_h.restype = c_short
self._dll.tf_h.argtypes = (c_short,)
assert self._dll.tf_h(-32766) == -10922
assert self.S() == -32766
def test_short_plus(self):
self._dll.tf_bh.restype = c_short
self._dll.tf_bh.argtypes = (c_byte, c_short)
assert self._dll.tf_bh(0, -32766) == -10922
assert self.S() == -32766
def test_ushort(self):
self._dll.tf_H.restype = c_ushort
self._dll.tf_H.argtypes = (c_ushort,)
assert self._dll.tf_H(65535) == 21845
assert self.U() == 65535
def test_ushort_plus(self):
self._dll.tf_bH.restype = c_ushort
self._dll.tf_bH.argtypes = (c_byte, c_ushort)
assert self._dll.tf_bH(0, 65535) == 21845
assert self.U() == 65535
def test_int(self):
self._dll.tf_i.restype = c_int
self._dll.tf_i.argtypes = (c_int,)
assert self._dll.tf_i(-2147483646) == -715827882
assert self.S() == -2147483646
def test_int_plus(self):
self._dll.tf_bi.restype = c_int
self._dll.tf_bi.argtypes = (c_byte, c_int)
assert self._dll.tf_bi(0, -2147483646) == -715827882
assert self.S() == -2147483646
def test_uint(self):
self._dll.tf_I.restype = c_uint
self._dll.tf_I.argtypes = (c_uint,)
assert self._dll.tf_I(4294967295) == 1431655765
assert self.U() == 4294967295
def test_uint_plus(self):
self._dll.tf_bI.restype = c_uint
self._dll.tf_bI.argtypes = (c_byte, c_uint)
assert self._dll.tf_bI(0, 4294967295) == 1431655765
assert self.U() == 4294967295
def test_long(self):
self._dll.tf_l.restype = c_long
self._dll.tf_l.argtypes = (c_long,)
assert self._dll.tf_l(-2147483646) == -715827882
assert self.S() == -2147483646
def test_long_plus(self):
self._dll.tf_bl.restype = c_long
self._dll.tf_bl.argtypes = (c_byte, c_long)
assert self._dll.tf_bl(0, -2147483646) == -715827882
assert self.S() == -2147483646
def test_ulong(self):
self._dll.tf_L.restype = c_ulong
self._dll.tf_L.argtypes = (c_ulong,)
assert self._dll.tf_L(4294967295) == 1431655765
assert self.U() == 4294967295
def test_ulong_plus(self):
self._dll.tf_bL.restype = c_ulong
self._dll.tf_bL.argtypes = (c_char, c_ulong)
assert self._dll.tf_bL(' ', 4294967295) == 1431655765
assert self.U() == 4294967295
def test_longlong(self):
self._dll.tf_q.restype = c_longlong
self._dll.tf_q.argtypes = (c_longlong, )
assert self._dll.tf_q(-9223372036854775806) == -3074457345618258602
assert self.S() == -9223372036854775806
def test_longlong_plus(self):
self._dll.tf_bq.restype = c_longlong
self._dll.tf_bq.argtypes = (c_byte, c_longlong)
assert self._dll.tf_bq(0, -9223372036854775806) == -3074457345618258602
assert self.S() == -9223372036854775806
def test_ulonglong(self):
self._dll.tf_Q.restype = c_ulonglong
self._dll.tf_Q.argtypes = (c_ulonglong, )
assert self._dll.tf_Q(18446744073709551615) == 6148914691236517205
assert self.U() == 18446744073709551615
def test_ulonglong_plus(self):
self._dll.tf_bQ.restype = c_ulonglong
self._dll.tf_bQ.argtypes = (c_byte, c_ulonglong)
assert self._dll.tf_bQ(0, 18446744073709551615) == 6148914691236517205
assert self.U() == 18446744073709551615
def test_float(self):
self._dll.tf_f.restype = c_float
self._dll.tf_f.argtypes = (c_float,)
assert self._dll.tf_f(-42.) == -14.
assert self.S() == -42
def test_float_plus(self):
self._dll.tf_bf.restype = c_float
self._dll.tf_bf.argtypes = (c_byte, c_float)
assert self._dll.tf_bf(0, -42.) == -14.
assert self.S() == -42
def test_double(self):
self._dll.tf_d.restype = c_double
self._dll.tf_d.argtypes = (c_double,)
assert self._dll.tf_d(42.) == 14.
assert self.S() == 42
def test_double_plus(self):
self._dll.tf_bd.restype = c_double
self._dll.tf_bd.argtypes = (c_byte, c_double)
assert self._dll.tf_bd(0, 42.) == 14.
assert self.S() == 42
def test_callwithresult(self):
def process_result(result):
return result * 2
self._dll.tf_i.restype = process_result
self._dll.tf_i.argtypes = (c_int,)
assert self._dll.tf_i(42) == 28
assert self.S() == 42
assert self._dll.tf_i(-42) == -28
assert self.S() == -42
def test_void(self):
self._dll.tv_i.restype = None
self._dll.tv_i.argtypes = (c_int,)
assert self._dll.tv_i(42) == None
assert self.S() == 42
assert self._dll.tv_i(-42) == None
assert self.S() == -42
# The following repeates the above tests with stdcall functions (where
# they are available)
try:
WinDLL
except NameError:
pass
else:
class stdcall_dll(WinDLL):
def __getattr__(self, name):
if name[:2] == '__' and name[-2:] == '__':
raise AttributeError(name)
func = self._FuncPtr(("s_" + name, self))
setattr(self, name, func)
return func
class TestStdcallCFunctions(TestCFunctions):
def setup_class(cls):
TestCFunctions.setup_class.im_func(cls)
cls._dll = stdcall_dll(_ctypes_test)
| [
"mssun@mesalock-linux.org"
] | mssun@mesalock-linux.org |
d3f4dc5383a82d4848dd5b8c53976b56998b176c | cbed9822648c05601fb84803c4cbfb63b3f9f6f5 | /supervised_learning/0x06-keras/5-main.py | e105ddb4ab40795846f405606c414ac52ea1abb9 | [] | no_license | thomasmontoya123/holbertonschool-machine_learning | 793ba1f732ccaf2e08b832f3c57e10d12eabe808 | 8f0f2ce67339e574b21d7dfbba7f88545adac807 | refs/heads/master | 2022-12-26T09:55:07.302952 | 2020-10-04T23:38:23 | 2020-10-04T23:38:23 | 279,631,927 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
build_model = __import__('1-input').build_model
optimize_model = __import__('2-optimize').optimize_model
one_hot = __import__('3-one_hot').one_hot
train_model = __import__('5-train').train_model
if __name__ == '__main__':
datasets = np.load('supervised_learning/data/MNIST.npz')
X_train = datasets['X_train']
X_train = X_train.reshape(X_train.shape[0], -1)
Y_train = datasets['Y_train']
Y_train_oh = one_hot(Y_train)
X_valid = datasets['X_valid']
X_valid = X_valid.reshape(X_valid.shape[0], -1)
Y_valid = datasets['Y_valid']
Y_valid_oh = one_hot(Y_valid)
np.random.seed(0)
tf.set_random_seed(0)
lambtha = 0.0001
keep_prob = 0.95
network = build_model(784, [256, 256, 10], ['relu', 'relu', 'softmax'], lambtha, keep_prob)
alpha = 0.001
beta1 = 0.9
beta2 = 0.999
optimize_model(network, alpha, beta1, beta2)
batch_size = 64
epochs = 5
train_model(network, X_train, Y_train_oh, batch_size, epochs, validation_data=(X_valid, Y_valid_oh)) | [
"tomasmontoya123@gmail.com"
] | tomasmontoya123@gmail.com |
0ecc04c7a3bebfd4f58ae6f8a34d8d47d464687f | 05e590af914370c3fe02526794ce9b41be893d2c | /day03/BMI多筆計算.py | c2dc0a100071d9bad093d8913fc909a910f75bed | [] | no_license | vincenttuan/yzu_python_20210414 | bf0f1d9f8549086008fe15701204dfc3a9ebf85a | b464c4691ce12e9076c8c2ab74158aeb4edc5bc7 | refs/heads/master | 2023-06-09T19:30:24.930453 | 2021-06-30T13:31:29 | 2021-06-30T13:31:29 | 362,433,389 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | import math
# 有三組資料 170, 50 ; 180, 70; 160, 60
def printBMI(h, w):
bmi = w / math.pow(h/100, 2)
#result = "過重" if bmi > 23 else "過輕" if bmi <= 18 else "正常"
result = "過輕"
if 18 < bmi <= 23:
result = "正常"
elif bmi > 23:
result = "過重"
print("h= %.1f w=%.1f bmi=%.2f result=%s" % (h, w, bmi, result))
printBMI(170, 50)
printBMI(180, 70)
printBMI(160, 60) | [
"vincentjava@yahoo.com.tw"
] | vincentjava@yahoo.com.tw |
b453077a97c2032779a9f82f95ec352f4048fa05 | 065e4cdb3b79c3697f323cbc3d29a79ca696b47f | /src/stomp/exc.py | d2e6aa7b1c92d9d1e93fd4c0dc3bd58ed9ee7f1b | [] | no_license | sousouindustries/python-stomp | 59aaa47884013ebdc3bfb6c7f4756ef3ee03547e | b2de7aa2f1658eaa49bffd977bd1c9630ef58f0c | refs/heads/master | 2021-01-10T03:35:04.103347 | 2016-01-08T16:35:10 | 2016-01-08T16:35:10 | 44,618,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py |
class FatalException(Exception):
pass
class InvalidCommandType(FatalException):
pass
class MalformedFrame(FatalException):
pass
class StompException(FatalException):
@classmethod
def fromframe(cls, frame):
return cls(frame)
class FrameNotConfirmed(Exception):
pass
| [
"cochiseruhulessin@gmail.com"
] | cochiseruhulessin@gmail.com |
ebfabd74d8b293ebd1f58bc34f2d72dfae2f3cc1 | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/tobami-littlechef/allPythonContent.py | 0551b5fc76bdb3a3ad48af4aad55b64b7d632ffb | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113,897 | py | __FILENAME__ = chef
#Copyright 2010-2014 Miquel Torres <tobami@gmail.com>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
"""Node configuration and syncing
See http://wiki.opscode.com/display/chef/Anatomy+of+a+Chef+Run
"""
import os
import shutil
import json
from copy import deepcopy
from fabric.api import *
from fabric.contrib.files import exists
from fabric import colors
from fabric.utils import abort
from fabric.contrib.project import rsync_project
from littlechef import cookbook_paths, whyrun, lib, solo
from littlechef import LOGFILE, enable_logs as ENABLE_LOGS
# Path to local patch
basedir = os.path.abspath(os.path.dirname(__file__).replace('\\', '/'))
def save_config(node, force=False):
"""Saves node configuration
if no nodes/hostname.json exists, or force=True, it creates one
it also saves to tmp_node.json
"""
filepath = os.path.join("nodes", env.host_string + ".json")
tmp_filename = 'tmp_{0}.json'.format(env.host_string)
files_to_create = [tmp_filename]
if not os.path.exists(filepath) or force:
# Only save to nodes/ if there is not already a file
print "Saving node configuration to {0}...".format(filepath)
files_to_create.append(filepath)
for node_file in files_to_create:
with open(node_file, 'w') as f:
f.write(json.dumps(node, indent=4, sort_keys=True))
return tmp_filename
def _get_ipaddress(node):
"""Adds the ipaddress attribute to the given node object if not already
present and it is correctly given by ohai
Returns True if ipaddress is added, False otherwise
"""
if "ipaddress" not in node:
with settings(hide('stdout'), warn_only=True):
output = sudo('ohai -l warn ipaddress')
if output.succeeded:
try:
node['ipaddress'] = json.loads(output)[0]
except ValueError:
abort("Could not parse ohai's output for ipaddress"
":\n {0}".format(output))
return True
return False
def sync_node(node):
"""Builds, synchronizes and configures a node.
It also injects the ipaddress to the node's config file if not already
existent.
"""
if node.get('dummy') or 'dummy' in node.get('tags', []):
lib.print_header("Skipping dummy: {0}".format(env.host))
return False
current_node = lib.get_node(node['name'])
# Always configure Chef Solo
solo.configure(current_node)
ipaddress = _get_ipaddress(node)
# Everything was configured alright, so save the node configuration
# This is done without credentials, so that we keep the node name used
# by the user and not the hostname or IP translated by .ssh/config
filepath = save_config(node, ipaddress)
try:
# Synchronize the kitchen directory
_synchronize_node(filepath, node)
# Execute Chef Solo
_configure_node()
finally:
_node_cleanup()
return True
def _synchronize_node(configfile, node):
"""Performs the Synchronize step of a Chef run:
Uploads all cookbooks, all roles and all databags to a node and add the
patch for data bags
Returns the node object of the node which is about to be configured,
or None if this node object cannot be found.
"""
msg = "Synchronizing nodes, environments, roles, cookbooks and data bags..."
if env.parallel:
msg = "[{0}]: {1}".format(env.host_string, msg)
print(msg)
# First upload node.json
remote_file = '/etc/chef/node.json'
put(configfile, remote_file, use_sudo=True, mode=400)
with hide('stdout'):
sudo('chown root:$(id -g -n root) {0}'.format(remote_file))
# Remove local temporary node file
os.remove(configfile)
# Synchronize kitchen
extra_opts = "-q"
if env.follow_symlinks:
extra_opts += " --copy-links"
ssh_opts = ""
if env.ssh_config_path:
ssh_opts += " -F %s" % os.path.expanduser(env.ssh_config_path)
if env.encrypted_data_bag_secret:
put(env.encrypted_data_bag_secret,
"/etc/chef/encrypted_data_bag_secret",
use_sudo=True,
mode=0600)
sudo('chown root:$(id -g -n root) /etc/chef/encrypted_data_bag_secret')
rsync_project(
env.node_work_path,
'./cookbooks ./data_bags ./roles ./site-cookbooks ./environments',
exclude=('*.svn', '.bzr*', '.git*', '.hg*'),
delete=True,
extra_opts=extra_opts,
ssh_opts=ssh_opts
)
if env.sync_packages_dest_dir and env.sync_packages_local_dir:
print("Uploading packages from {0} to remote server {2} directory "
"{1}").format(env.sync_packages_local_dir, env.sync_packages_dest_dir, env.host_string)
try:
rsync_project(
env.sync_packages_dest_dir,
env.sync_packages_local_dir+"/*",
exclude=('*.svn', '.bzr*', '.git*', '.hg*'),
delete=True,
extra_opts=extra_opts,
ssh_opts=ssh_opts
)
except:
print("Warning: package upload failed. Continuing cooking...")
_add_environment_lib() # NOTE: Chef 10 only
def build_dct(dic, keys, value):
"""Builds a dictionary with arbitrary depth out of a key list"""
key = keys.pop(0)
if len(keys):
dic.setdefault(key, {})
build_dct(dic[key], keys, value)
else:
# Transform cookbook default attribute strings into proper booleans
if value == "false":
value = False
elif value == "true":
value = True
# It's a leaf, assign value
dic[key] = deepcopy(value)
def update_dct(dic1, dic2):
"""Merges two dictionaries recursively
dic2 will have preference over dic1
"""
for key, val in dic2.items():
if isinstance(val, dict):
dic1.setdefault(key, {})
update_dct(dic1[key], val)
else:
dic1[key] = val
def _add_automatic_attributes(node):
"""Adds some of Chef's automatic attributes:
http://wiki.opscode.com/display/chef/Recipes#Recipes
-CommonAutomaticAttributes
"""
node['fqdn'] = node['name']
node['hostname'] = node['fqdn'].split('.')[0]
node['domain'] = ".".join(node['fqdn'].split('.')[1:])
def _add_merged_attributes(node, all_recipes, all_roles):
"""Merges attributes from cookbooks, node and roles
Chef Attribute precedence:
http://docs.opscode.com/essentials_cookbook_attribute_files.html#attribute-precedence
LittleChef implements, in precedence order:
- Cookbook default
- Environment default
- Role default
- Node normal
- Role override
- Environment override
NOTE: In order for cookbook attributes to be read, they need to be
correctly defined in its metadata.json
"""
# Get cookbooks from extended recipes
attributes = {}
for recipe in node['recipes']:
# Find this recipe
found = False
for r in all_recipes:
if recipe == r['name']:
found = True
for attr in r['attributes']:
if r['attributes'][attr].get('type') == "hash":
value = {}
else:
value = r['attributes'][attr].get('default')
# Attribute dictionaries are defined as a single
# compound key. Split and build proper dict
build_dct(attributes, attr.split("/"), value)
if not found:
error = "Could not find recipe '{0}' while ".format(recipe)
error += "building node data bag for '{0}'".format(node['name'])
abort(error)
# Get default role attributes
for role in node['roles']:
for r in all_roles:
if role == r['name']:
update_dct(attributes, r.get('default_attributes', {}))
# Get default environment attributes
environment = lib.get_environment(node['chef_environment'])
update_dct(attributes, environment.get('default_attributes', {}))
# Get normal node attributes
non_attribute_fields = [
'id', 'name', 'role', 'roles', 'recipes', 'run_list', 'ipaddress']
node_attributes = {}
for key in node:
if key in non_attribute_fields:
continue
node_attributes[key] = node[key]
update_dct(attributes, node_attributes)
# Get override role attributes
for role in node['roles']:
for r in all_roles:
if role == r['name']:
update_dct(attributes, r.get('override_attributes', {}))
# Get override environment attributes
update_dct(attributes, environment.get('override_attributes', {}))
# Merge back to the original node object
node.update(attributes)
def build_node_data_bag():
"""Builds one 'node' data bag item per file found in the 'nodes' directory
Automatic attributes for a node item:
'id': It adds data bag 'id', same as filename but with underscores
'name': same as the filename
'fqdn': same as the filename (LittleChef filenames should be fqdns)
'hostname': Uses the first part of the filename as the hostname
(until it finds a period) minus the .json extension
'domain': filename minus the first part of the filename (hostname)
minus the .json extension
In addition, it will contain the merged attributes from:
All default cookbook attributes corresponding to the node
All attributes found in nodes/<item>.json file
Default and override attributes from all roles
"""
nodes = lib.get_nodes()
node_data_bag_path = os.path.join('data_bags', 'node')
# In case there are leftovers
remove_local_node_data_bag()
os.makedirs(node_data_bag_path)
all_recipes = lib.get_recipes()
all_roles = lib.get_roles()
for node in nodes:
# Dots are not allowed (only alphanumeric), substitute by underscores
node['id'] = node['name'].replace('.', '_')
# Build extended role list
node['role'] = lib.get_roles_in_node(node)
node['roles'] = node['role'][:]
for role in node['role']:
node['roles'].extend(lib.get_roles_in_role(role))
node['roles'] = list(set(node['roles']))
# Build extended recipe list
node['recipes'] = lib.get_recipes_in_node(node)
# Add recipes found inside each roles in the extended role list
for role in node['roles']:
node['recipes'].extend(lib.get_recipes_in_role(role))
node['recipes'] = list(set(node['recipes']))
# Add node attributes
_add_merged_attributes(node, all_recipes, all_roles)
_add_automatic_attributes(node)
# Save node data bag item
with open(os.path.join(
'data_bags', 'node', node['id'] + '.json'), 'w') as f:
f.write(json.dumps(node))
def remove_local_node_data_bag():
"""Removes generated 'node' data_bag locally"""
node_data_bag_path = os.path.join('data_bags', 'node')
if os.path.exists(node_data_bag_path):
shutil.rmtree(node_data_bag_path)
def _remove_remote_node_data_bag():
"""Removes generated 'node' data_bag from the remote node"""
node_data_bag_path = os.path.join(env.node_work_path, 'data_bags', 'node')
if exists(node_data_bag_path):
sudo("rm -rf {0}".format(node_data_bag_path))
def _node_cleanup():
if env.loglevel is not "debug":
with hide('running', 'stdout'):
_remove_remote_node_data_bag()
with settings(warn_only=True):
sudo("rm '/etc/chef/node.json'")
if env.encrypted_data_bag_secret:
sudo("rm '/etc/chef/encrypted_data_bag_secret'")
def _add_environment_lib():
"""Adds the chef_solo_envs cookbook, which provides a library that adds
environment attribute compatibility for chef-solo v10
NOTE: Chef 10 only
"""
# Create extra cookbook dir
lib_path = os.path.join(env.node_work_path, cookbook_paths[0],
'chef_solo_envs', 'libraries')
with hide('running', 'stdout'):
sudo('mkdir -p {0}'.format(lib_path))
# Add environment patch to the node's cookbooks
put(os.path.join(basedir, 'environment.rb'),
os.path.join(lib_path, 'environment.rb'), use_sudo=True)
def _configure_node():
"""Exectutes chef-solo to apply roles and recipes to a node"""
print("")
msg = "Cooking..."
if env.parallel:
msg = "[{0}]: {1}".format(env.host_string, msg)
print(msg)
# Backup last report
with settings(hide('stdout', 'warnings', 'running'), warn_only=True):
sudo("mv {0} {0}.1".format(LOGFILE))
# Build chef-solo command
cmd = "RUBYOPT=-Ku chef-solo"
if whyrun:
cmd += " --why-run"
cmd += ' -l {0} -j /etc/chef/node.json'.format(env.loglevel)
if ENABLE_LOGS:
cmd += ' | tee {0}'.format(LOGFILE)
if env.loglevel == "debug":
print("Executing Chef Solo with the following command:\n"
"{0}".format(cmd))
with settings(hide('warnings', 'running'), warn_only=True):
output = sudo(cmd)
if (output.failed or "FATAL: Stacktrace dumped" in output or
("Chef Run complete" not in output and
"Report handlers complete" not in output)):
if 'chef-solo: command not found' in output:
print(
colors.red(
"\nFAILED: Chef Solo is not installed on this node"))
print(
"Type 'fix node:{0} deploy_chef' to install it".format(
env.host))
abort("")
else:
print(colors.red(
"\nFAILED: chef-solo could not finish configuring the node\n"))
import sys
sys.exit(1)
else:
msg = "\n"
if env.parallel:
msg += "[{0}]: ".format(env.host_string)
msg += "SUCCESS: Node correctly configured"
print(colors.green(msg))
########NEW FILE########
__FILENAME__ = exceptions
#Copyright 2010-2014 Miquel Torres <tobami@gmail.com>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
class FileNotFoundError(Exception):
pass
########NEW FILE########
__FILENAME__ = lib
#Copyright 2010-2013 Miquel Torres <tobami@gmail.com>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
"""Library for parsing and printing role, cookbook and node information"""
import os
import json
import subprocess
import imp
from fabric import colors
from fabric.api import env
from fabric.contrib.console import confirm
from fabric.utils import abort
from littlechef import cookbook_paths
from littlechef.exceptions import FileNotFoundError
knife_installed = True
def _resolve_hostname(name):
"""Returns resolved hostname using the ssh config"""
if env.ssh_config is None:
return name
elif not os.path.exists(os.path.join("nodes", name + ".json")):
resolved_name = env.ssh_config.lookup(name)['hostname']
if os.path.exists(os.path.join("nodes", resolved_name + ".json")):
name = resolved_name
return name
def get_env_host_string():
if not env.host_string:
abort('no node specified\nUsage: fix node:<MYNODES> <COMMAND>')
if '@' in env.host_string:
env.user = env.host_string.split('@')[0]
return _resolve_hostname(env.host_string)
def env_from_template(name):
"""Returns a basic environment structure"""
return {
"name": name,
"default_attributes": {},
"json_class": "Chef::Environment",
"chef_type": "environment",
"description": "",
"cookbook_versions": {}
}
def get_environment(name):
"""Returns a JSON environment file as a dictionary"""
if name == "_default":
return env_from_template(name)
filename = os.path.join("environments", name + ".json")
try:
with open(filename) as f:
try:
return json.loads(f.read())
except ValueError as e:
msg = 'LittleChef found the following error in'
msg += ' "{0}":\n {1}'.format(filename, str(e))
abort(msg)
except IOError:
raise FileNotFoundError('File {0} not found'.format(filename))
def get_environments():
"""Gets all environments found in the 'environments' directory"""
envs = []
for root, subfolders, files in os.walk('environments'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('environments'):], filename[:-len('.json')])
envs.append(get_environment(path))
return sorted(envs, key=lambda x: x['name'])
def get_node(name, merged=False):
"""Returns a JSON node file as a dictionary"""
if merged:
node_path = os.path.join("data_bags", "node", name.replace('.', '_') + ".json")
else:
node_path = os.path.join("nodes", name + ".json")
if os.path.exists(node_path):
# Read node.json
with open(node_path, 'r') as f:
try:
node = json.loads(f.read())
except ValueError as e:
msg = 'LittleChef found the following error in'
msg += ' "{0}":\n {1}'.format(node_path, str(e))
abort(msg)
else:
print "Creating new node file '{0}.json'".format(name)
node = {'run_list': []}
# Add node name so that we can tell to which node it is
node['name'] = name
if not node.get('chef_environment'):
node['chef_environment'] = '_default'
return node
def get_nodes(environment=None):
"""Gets all nodes found in the nodes/ directory"""
if not os.path.exists('nodes'):
return []
nodes = []
for filename in sorted(
[f for f in os.listdir('nodes')
if (not os.path.isdir(f)
and f.endswith(".json") and not f.startswith('.'))]):
fqdn = ".".join(filename.split('.')[:-1]) # Remove .json from name
node = get_node(fqdn)
if environment is None or node.get('chef_environment') == environment:
nodes.append(node)
return nodes
def get_nodes_with_role(role_name, environment=None):
"""Get all nodes which include a given role,
prefix-searches are also supported
"""
prefix_search = role_name.endswith("*")
if prefix_search:
role_name = role_name.rstrip("*")
for n in get_nodes(environment):
roles = get_roles_in_node(n, recursive=True)
if prefix_search:
if any(role.startswith(role_name) for role in roles):
yield n
else:
if role_name in roles:
yield n
def get_nodes_with_tag(tag, environment=None, include_guests=False):
"""Get all nodes which include a given tag"""
nodes = get_nodes(environment)
nodes_mapping = dict((n['name'], n) for n in nodes)
for n in nodes:
if tag in n.get('tags', []):
# Remove from node mapping so it doesn't get added twice by
# guest walking below
try:
del nodes_mapping[n['fqdn']]
except KeyError:
pass
yield n
# Walk guest if it is a host
if include_guests and n.get('virtualization', {}).get('role') == 'host':
for guest in n['virtualization']['guests']:
try:
yield nodes_mapping[guest['fqdn']]
except KeyError:
# we ignore guests which are not in the same
# chef environments than their hosts for now
pass
def get_nodes_with_recipe(recipe_name, environment=None):
"""Get all nodes which include a given recipe,
prefix-searches are also supported
"""
prefix_search = recipe_name.endswith("*")
if prefix_search:
recipe_name = recipe_name.rstrip("*")
for n in get_nodes(environment):
recipes = get_recipes_in_node(n)
for role in get_roles_in_node(n, recursive=True):
recipes.extend(get_recipes_in_role(role))
if prefix_search:
if any(recipe.startswith(recipe_name) for recipe in recipes):
yield n
else:
if recipe_name in recipes:
yield n
def print_node(node, detailed=False):
"""Pretty prints the given node"""
nodename = node['name']
print(colors.yellow("\n" + nodename))
# Roles
if detailed:
for role in get_roles_in_node(node):
print_role(_get_role(role), detailed=False)
else:
print(' Roles: {0}'.format(", ".join(get_roles_in_node(node))))
# Recipes
if detailed:
for recipe in get_recipes_in_node(node):
print " Recipe:", recipe
print " attributes: {0}".format(node.get(recipe, ""))
else:
print(' Recipes: {0}'.format(", ".join(get_recipes_in_node(node))))
# Node attributes
print " Node attributes:"
for attribute in node.keys():
if attribute == "run_list" or attribute == "name":
continue
print " {0}: {1}".format(attribute, node[attribute])
def print_nodes(nodes, detailed=False):
"""Prints all the given nodes"""
found = 0
for node in nodes:
found += 1
print_node(node, detailed=detailed)
print("\nFound {0} node{1}".format(found, "s" if found != 1 else ""))
def _generate_metadata(path, cookbook_path, name):
"""Checks whether metadata.rb has changed and regenerate metadata.json"""
global knife_installed
if not knife_installed:
return
metadata_path_rb = os.path.join(path, 'metadata.rb')
metadata_path_json = os.path.join(path, 'metadata.json')
if (os.path.exists(metadata_path_rb) and
(not os.path.exists(metadata_path_json) or
os.stat(metadata_path_rb).st_mtime >
os.stat(metadata_path_json).st_mtime)):
error_msg = "Warning: metadata.json for {0}".format(name)
error_msg += " in {0} is older that metadata.rb".format(cookbook_path)
error_msg += ", cookbook attributes could be out of date\n\n"
try:
proc = subprocess.Popen(
['knife', 'cookbook', 'metadata', '-o', cookbook_path, name],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
resp, error = proc.communicate()
if ('ERROR:' in resp or 'FATAL:' in resp
or 'Generating metadata for' not in resp):
if("No user specified, pass via -u or specifiy 'node_name'"
in error):
error_msg += "You need to have an up-to-date (>=0.10.x)"
error_msg += " version of knife installed locally in order"
error_msg += " to generate metadata.json.\nError "
else:
error_msg += "Unkown error "
error_msg += "while executing knife to generate "
error_msg += "metadata.json for {0}".format(path)
print(error_msg)
print resp
if env.loglevel == 'debug':
print "\n".join(resp.split("\n")[:2])
except OSError:
knife_installed = False
error_msg += "If you locally install Chef's knife tool, LittleChef"
error_msg += " will regenerate metadata.json files automatically\n"
print(error_msg)
else:
print("Generated metadata.json for {0}\n".format(path))
def get_recipes_in_cookbook(name):
"""Gets the name of all recipes present in a cookbook
Returns a list of dictionaries
"""
recipes = {}
path = None
cookbook_exists = False
metadata_exists = False
for cookbook_path in cookbook_paths:
path = os.path.join(cookbook_path, name)
path_exists = os.path.exists(path)
# cookbook exists if present in any of the cookbook paths
cookbook_exists = cookbook_exists or path_exists
if not path_exists:
continue
_generate_metadata(path, cookbook_path, name)
# Now try to open metadata.json
try:
with open(os.path.join(path, 'metadata.json'), 'r') as f:
try:
cookbook = json.loads(f.read())
except ValueError as e:
msg = "Little Chef found the following error in your"
msg += " {0} file:\n {1}".format(
os.path.join(path, 'metadata.json'), e)
abort(msg)
# Add each recipe defined in the cookbook
metadata_exists = True
recipe_defaults = {
'description': '',
'version': cookbook.get('version'),
'dependencies': cookbook.get('dependencies', {}).keys(),
'attributes': cookbook.get('attributes', {})
}
for recipe in cookbook.get('recipes', []):
recipes[recipe] = dict(
recipe_defaults,
name=recipe,
description=cookbook['recipes'][recipe]
)
# Cookbook metadata.json was found, don't try next cookbook path
# because metadata.json in site-cookbooks has preference
break
except IOError:
# metadata.json was not found, try next cookbook_path
pass
if not cookbook_exists:
abort('Unable to find cookbook "{0}"'.format(name))
elif not metadata_exists:
abort('Cookbook "{0}" has no metadata.json'.format(name))
# Add recipes found in the 'recipes' directory but not listed
# in the metadata
for cookbook_path in cookbook_paths:
recipes_dir = os.path.join(cookbook_path, name, 'recipes')
if not os.path.isdir(recipes_dir):
continue
for basename in os.listdir(recipes_dir):
fname, ext = os.path.splitext(basename)
if ext != '.rb':
continue
if fname != 'default':
recipe = '%s::%s' % (name, fname)
else:
recipe = name
if recipe not in recipes:
recipes[recipe] = dict(recipe_defaults, name=recipe)
# When a recipe has no default recipe (libraries?),
# add one so that it is listed
if not recipes:
recipes[name] = dict(
recipe_defaults,
name=name,
description='This cookbook has no default recipe'
)
return recipes.values()
def get_recipes_in_role(rolename):
"""Gets all recipes defined in a role's run_list"""
recipes = get_recipes_in_node(_get_role(rolename))
return recipes
def get_recipes_in_node(node):
"""Gets the name of all recipes present in the run_list of a node"""
recipes = []
for elem in node.get('run_list', []):
if elem.startswith("recipe"):
recipe = elem.split('[')[1].split(']')[0]
recipes.append(recipe)
return recipes
def get_recipes():
"""Gets all recipes found in the cookbook directories"""
dirnames = set()
for path in cookbook_paths:
dirnames.update([d for d in os.listdir(path) if os.path.isdir(
os.path.join(path, d)) and not d.startswith('.')])
recipes = []
for dirname in dirnames:
recipes.extend(get_recipes_in_cookbook(dirname))
return sorted(recipes, key=lambda x: x['name'])
def print_recipe(recipe):
"""Pretty prints the given recipe"""
print(colors.yellow("\n{0}".format(recipe['name'])))
print " description: {0}".format(recipe['description'])
print " version: {0}".format(recipe['version'])
print " dependencies: {0}".format(", ".join(recipe['dependencies']))
print " attributes: {0}".format(", ".join(recipe['attributes']))
def get_roles_in_role(rolename):
"""Gets all roles defined in a role's run_list"""
return get_roles_in_node(_get_role(rolename))
def get_roles_in_node(node, recursive=False, depth=0):
"""Returns a list of roles found in the run_list of a node
* recursive: True feches roles recursively
* depth: Keeps track of recursion depth
"""
LIMIT = 5
roles = []
for elem in node.get('run_list', []):
if elem.startswith("role"):
role = elem.split('[')[1].split(']')[0]
if role not in roles:
roles.append(role)
if recursive and depth <= LIMIT:
roles.extend(get_roles_in_node(_get_role(role),
recursive=True,
depth=depth + 1))
return list(set(roles))
def _get_role(rolename):
"""Reads and parses a file containing a role"""
path = os.path.join('roles', rolename + '.json')
if not os.path.exists(path):
abort("Couldn't read role file {0}".format(path))
with open(path, 'r') as f:
try:
role = json.loads(f.read())
except ValueError as e:
msg = "Little Chef found the following error in your"
msg += " {0}.json file:\n {1}".format(rolename, str(e))
abort(msg)
role['fullname'] = rolename
return role
def get_roles():
"""Gets all roles found in the 'roles' directory"""
roles = []
for root, subfolders, files in os.walk('roles'):
for filename in files:
if filename.endswith(".json"):
path = os.path.join(
root[len('roles'):], filename[:-len('.json')])
roles.append(_get_role(path))
return sorted(roles, key=lambda x: x['fullname'])
def print_role(role, detailed=True):
"""Pretty prints the given role"""
if detailed:
print(colors.yellow(role.get('fullname')))
else:
print(" Role: {0}".format(role.get('fullname')))
if detailed:
print(" description: {0}".format(role.get('description')))
if 'default_attributes' in role:
print(" default_attributes:")
_pprint(role['default_attributes'])
if 'override_attributes' in role:
print(" override_attributes:")
_pprint(role['override_attributes'])
if detailed:
print(" run_list: {0}".format(role.get('run_list')))
print("")
def print_plugin_list():
"""Prints a list of available plugins"""
print("List of available plugins:")
for plugin in get_plugins():
_pprint(plugin)
def get_plugins():
"""Gets available plugins by looking into the plugins/ directory"""
if os.path.exists('plugins'):
for filename in sorted([f for f in os.listdir('plugins')
if not os.path.isdir(f) and f.endswith(".py")]):
plugin_name = filename[:-3]
try:
plugin = import_plugin(plugin_name)
except SystemExit as e:
description = "Plugin has a syntax error"
else:
description = plugin.__doc__ or "No description found"
yield {plugin_name: description}
def import_plugin(name):
"""Imports plugin python module"""
path = os.path.join("plugins", name + ".py")
try:
with open(path, 'rb') as f:
try:
plugin = imp.load_module(
"p_" + name, f, name + '.py',
('.py', 'rb', imp.PY_SOURCE)
)
except SyntaxError as e:
error = "Found plugin '{0}', but it seems".format(name)
error += " to have a syntax error: {0}".format(str(e))
abort(error)
except IOError:
abort("Sorry, could not find '{0}.py' in the plugin directory".format(
name))
return plugin
def get_cookbook_path(cookbook_name):
"""Returns path to the cookbook for the given cookbook name"""
for cookbook_path in cookbook_paths:
path = os.path.join(cookbook_path, cookbook_name)
if os.path.exists(path):
return path
raise IOError('Can\'t find cookbook with name "{0}"'.format(cookbook_name))
def global_confirm(question, default=True):
"""Shows a confirmation that applies to all hosts
by temporarily disabling parallel execution in Fabric
"""
if env.abort_on_prompts:
return True
original_parallel = env.parallel
env.parallel = False
result = confirm(question, default)
env.parallel = original_parallel
return result
def _pprint(dic):
"""Prints a dictionary with one indentation level"""
for key, value in dic.items():
print(" {0}: {1}".format(key, value))
def print_header(string):
"""Prints a colored header"""
print(colors.yellow("\n== {0} ==".format(string)))
def get_margin(length):
"""Add enough tabs to align in two columns"""
if length > 23:
margin_left = "\t"
chars = 1
elif length > 15:
margin_left = "\t\t"
chars = 2
elif length > 7:
margin_left = "\t\t\t"
chars = 3
else:
margin_left = "\t\t\t\t"
chars = 4
return margin_left
########NEW FILE########
__FILENAME__ = runner
#Copyright 2010-2014 Miquel Torres <tobami@gmail.com>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
"""LittleChef: Configuration Management using Chef Solo"""
import ConfigParser
import os
import sys
import json
from fabric.api import *
from fabric.contrib.console import confirm
from paramiko.config import SSHConfig as _SSHConfig
import littlechef
from littlechef import solo, lib, chef
# Fabric settings
import fabric
fabric.state.output['running'] = False
env.loglevel = littlechef.loglevel
env.verbose = littlechef.verbose
env.abort_on_prompts = littlechef.noninteractive
env.chef_environment = littlechef.chef_environment
env.node_work_path = littlechef.node_work_path
if littlechef.concurrency:
env.output_prefix = True
env.parallel = True
env.pool_size = littlechef.concurrency
else:
env.output_prefix = False
__testing__ = False
@hosts('setup')
def new_kitchen():
"""Create LittleChef directory structure (Kitchen)"""
def _mkdir(d, content=""):
if not os.path.exists(d):
os.mkdir(d)
# Add a README so that it can be added to version control
readme_path = os.path.join(d, 'README')
if not os.path.exists(readme_path):
with open(readme_path, "w") as readme:
print >> readme, content
print "{0}/ directory created...".format(d)
content = "# The /nodes directory contains your nodes as JSON files "
content += "representing a node.\n"
content += "# Example node file `nodes/myfqdn.json`:\n"
data = {
"chef_environment": "production",
"apt": {"cacher_port": 3143},
"run_list": ["recipe[apt]"]
}
content += "{0}".format(json.dumps(data, indent=2))
_mkdir("nodes", content)
_mkdir("roles")
_mkdir("data_bags")
_mkdir("environments")
for cookbook_path in littlechef.cookbook_paths:
_mkdir(cookbook_path)
# Add skeleton config file
if not os.path.exists(littlechef.CONFIGFILE):
with open(littlechef.CONFIGFILE, 'w') as configfh:
print >> configfh, "[userinfo]"
print >> configfh, "user = "
print >> configfh, "password = "
print >> configfh, "keypair-file = "
print >> configfh, "ssh-config = "
print >> configfh, "encrypted_data_bag_secret = "
print >> configfh, "[kitchen]"
print >> configfh, "node_work_path = /tmp/chef-solo/"
print "{0} file created...".format(littlechef.CONFIGFILE)
def nodes_with_role(rolename):
"""Configures a list of nodes that have the given role in their run list"""
nodes = [n['name'] for n in
lib.get_nodes_with_role(rolename, env.chef_environment)]
if not len(nodes):
print("No nodes found with role '{0}'".format(rolename))
sys.exit(0)
return node(*nodes)
def nodes_with_recipe(recipename):
"""Configures a list of nodes that have the given recipe in their run list
"""
nodes = [n['name'] for n in
lib.get_nodes_with_recipe(recipename, env.chef_environment)]
if not len(nodes):
print("No nodes found with recipe '{0}'".format(recipename))
sys.exit(0)
return node(*nodes)
def nodes_with_tag(tag):
"""Sets a list of nodes that have the given tag assigned and calls node()"""
nodes = lib.get_nodes_with_tag(tag, env.chef_environment,
littlechef.include_guests)
nodes = [n['name'] for n in nodes]
if not len(nodes):
print("No nodes found with tag '{0}'".format(tag))
sys.exit(0)
return node(*nodes)
@hosts('setup')
def node(*nodes):
"""Selects and configures a list of nodes. 'all' configures all nodes"""
chef.build_node_data_bag()
if not len(nodes) or nodes[0] == '':
abort('No node was given')
elif nodes[0] == 'all':
# Fetch all nodes and add them to env.hosts
for node in lib.get_nodes(env.chef_environment):
env.hosts.append(node['name'])
if not len(env.hosts):
abort('No nodes found in /nodes/')
message = "Are you sure you want to configure all nodes ({0})".format(
len(env.hosts))
if env.chef_environment:
message += " in the {0} environment".format(env.chef_environment)
message += "?"
if not __testing__:
if not lib.global_confirm(message):
abort('Aborted by user')
else:
# A list of nodes was given
env.hosts = list(nodes)
env.all_hosts = list(env.hosts) # Shouldn't be needed
# Check whether another command was given in addition to "node:"
if not(littlechef.__cooking__ and
'node:' not in sys.argv[-1] and
'nodes_with_role:' not in sys.argv[-1] and
'nodes_with_recipe:' not in sys.argv[-1] and
'nodes_with_tag:' not in sys.argv[-1]):
# If user didn't type recipe:X, role:Y or deploy_chef,
# configure the nodes
with settings():
execute(_node_runner)
chef.remove_local_node_data_bag()
def _configure_fabric_for_platform(platform):
"""Configures fabric for a specific platform"""
if platform == "freebsd":
env.shell = "/bin/sh -c"
def _node_runner():
"""This is only used by node so that we can execute in parallel"""
env.host_string = lib.get_env_host_string()
node = lib.get_node(env.host_string)
_configure_fabric_for_platform(node.get("platform"))
if __testing__:
print "TEST: would now configure {0}".format(env.host_string)
else:
lib.print_header("Configuring {0}".format(env.host_string))
chef.sync_node(node)
def deploy_chef(gems="no", ask="yes", version="0.10", distro_type=None,
distro=None, platform=None, stop_client='yes', method=None):
"""Install chef-solo on a node"""
env.host_string = lib.get_env_host_string()
deprecated_parameters = [distro_type, distro, platform]
if any(param is not None for param in deprecated_parameters) or gems != 'no':
print("DeprecationWarning: the parameters 'gems', distro_type',"
" 'distro' and 'platform' will no longer be supported "
"in future versions of LittleChef. Use 'method' instead")
if distro_type is None and distro is None:
distro_type, distro, platform = solo.check_distro()
elif distro_type is None or distro is None:
abort('Must specify both or neither of distro_type and distro')
if method:
if method not in ['omnibus', 'gentoo', 'pacman']:
abort('Invalid omnibus method {0}. Supported methods are '
'omnibus, gentoo and pacman'.format(method))
msg = "{0} using the {1} installer".format(version, method)
else:
if gems == "yes":
msg = 'using gems for "{0}"'.format(distro)
else:
msg = '{0} using "{1}" packages'.format(version, distro)
if method == 'omnibus' or ask == "no" or littlechef.noninteractive:
print("Deploying Chef {0}...".format(msg))
else:
message = ('\nAre you sure you want to install Chef '
'{0} on node {1}?'.format(msg, env.host_string))
if not confirm(message):
abort('Aborted by user')
_configure_fabric_for_platform(platform)
if not __testing__:
solo.install(distro_type, distro, gems, version, stop_client, method)
solo.configure()
# Build a basic node file if there isn't one already
# with some properties from ohai
with settings(hide('stdout'), warn_only=True):
output = sudo('ohai -l warn')
if output.succeeded:
try:
ohai = json.loads(output)
except ValueError:
abort("Could not parse ohai's output"
":\n {0}".format(output))
node = {"run_list": []}
for attribute in ["ipaddress", "platform", "platform_family",
"platform_version"]:
if ohai.get(attribute):
node[attribute] = ohai[attribute]
chef.save_config(node)
def recipe(recipe):
"""Apply the given recipe to a node
Sets the run_list to the given recipe
If no nodes/hostname.json file exists, it creates one
"""
env.host_string = lib.get_env_host_string()
lib.print_header(
"Applying recipe '{0}' on node {1}".format(recipe, env.host_string))
# Create configuration and sync node
data = lib.get_node(env.host_string)
data["run_list"] = ["recipe[{0}]".format(recipe)]
if not __testing__:
chef.sync_node(data)
def role(role):
"""Apply the given role to a node
Sets the run_list to the given role
If no nodes/hostname.json file exists, it creates one
"""
env.host_string = lib.get_env_host_string()
lib.print_header(
"Applying role '{0}' to {1}".format(role, env.host_string))
# Now create configuration and sync node
data = lib.get_node(env.host_string)
data["run_list"] = ["role[{0}]".format(role)]
if not __testing__:
chef.sync_node(data)
def ssh(name):
"""Executes the given command"""
env.host_string = lib.get_env_host_string()
print("\nExecuting the command '{0}' on node {1}...".format(
name, env.host_string))
# Execute remotely using either the sudo or the run fabric functions
with settings(hide("warnings"), warn_only=True):
if name.startswith("sudo "):
sudo(name[5:])
else:
run(name)
def plugin(name):
"""Executes the selected plugin
Plugins are expected to be found in the kitchen's 'plugins' directory
"""
env.host_string = lib.get_env_host_string()
plug = lib.import_plugin(name)
lib.print_header("Executing plugin '{0}' on "
"{1}".format(name, env.host_string))
node = lib.get_node(env.host_string)
if node == {'run_list': []}:
node['name'] = env.host_string
plug.execute(node)
print("Finished executing plugin")
@hosts('api')
def list_nodes():
"""List all configured nodes"""
lib.print_nodes(lib.get_nodes(env.chef_environment))
@hosts('api')
def list_nodes_detailed():
"""Show a detailed list of all nodes"""
lib.print_nodes(lib.get_nodes(env.chef_environment), detailed=True)
@hosts('api')
def list_nodes_with_recipe(recipe):
"""Show all nodes which have assigned a given recipe"""
lib.print_nodes(lib.get_nodes_with_recipe(recipe, env.chef_environment))
@hosts('api')
def list_nodes_with_role(role):
"""Show all nodes which have assigned a given role"""
lib.print_nodes(lib.get_nodes_with_role(role, env.chef_environment))
@hosts('api')
def list_envs():
"""List all environments"""
for env in lib.get_environments():
margin_left = lib.get_margin(len(env['name']))
print("{0}{1}{2}".format(
env['name'], margin_left,
env.get('description', '(no description)')))
@hosts('api')
def list_nodes_with_tag(tag):
"""Show all nodes which have assigned a given tag"""
lib.print_nodes(lib.get_nodes_with_tag(tag, env.chef_environment,
littlechef.include_guests))
@hosts('api')
def list_recipes():
"""Show a list of all available recipes"""
for recipe in lib.get_recipes():
margin_left = lib.get_margin(len(recipe['name']))
print("{0}{1}{2}".format(
recipe['name'], margin_left, recipe['description']))
@hosts('api')
def list_recipes_detailed():
"""Show detailed information for all recipes"""
for recipe in lib.get_recipes():
lib.print_recipe(recipe)
@hosts('api')
def list_roles():
"""Show a list of all available roles"""
for role in lib.get_roles():
margin_left = lib.get_margin(len(role['fullname']))
print("{0}{1}{2}".format(
role['fullname'], margin_left,
role.get('description', '(no description)')))
@hosts('api')
def list_roles_detailed():
"""Show detailed information for all roles"""
for role in lib.get_roles():
lib.print_role(role)
@hosts('api')
def list_plugins():
"""Show all available plugins"""
lib.print_plugin_list()
def _check_appliances():
"""Looks around and return True or False based on whether we are in a
kitchen
"""
filenames = os.listdir(os.getcwd())
missing = []
for dirname in ['nodes', 'environments', 'roles', 'cookbooks', 'data_bags']:
if (dirname not in filenames) or (not os.path.isdir(dirname)):
missing.append(dirname)
return (not bool(missing)), missing
def _readconfig():
"""Configures environment variables"""
config = ConfigParser.SafeConfigParser()
try:
found = config.read(littlechef.CONFIGFILE)
except ConfigParser.ParsingError as e:
abort(str(e))
if not len(found):
try:
found = config.read(['config.cfg', 'auth.cfg'])
except ConfigParser.ParsingError as e:
abort(str(e))
if len(found):
print('\nDeprecationWarning: deprecated config file name \'{0}\'.'
' Use {1}'.format(found[0], littlechef.CONFIGFILE))
else:
abort('No {0} file found in the current '
'directory'.format(littlechef.CONFIGFILE))
in_a_kitchen, missing = _check_appliances()
missing_str = lambda m: ' and '.join(', '.join(m).rsplit(', ', 1))
if not in_a_kitchen:
abort("Couldn't find {0}. "
"Are you executing 'fix' outside of a kitchen?\n"
"To create a new kitchen in the current directory "
" type 'fix new_kitchen'".format(missing_str(missing)))
# We expect an ssh_config file here,
# and/or a user, (password/keyfile) pair
try:
env.ssh_config_path = config.get('userinfo', 'ssh-config')
except ConfigParser.NoSectionError:
abort('You need to define a "userinfo" section'
' in the config file. Refer to the README for help '
'(http://github.com/tobami/littlechef)')
except ConfigParser.NoOptionError:
env.ssh_config_path = None
if env.ssh_config_path:
env.ssh_config = _SSHConfig()
env.ssh_config_path = os.path.expanduser(env.ssh_config_path)
env.use_ssh_config = True
try:
env.ssh_config.parse(open(env.ssh_config_path))
except IOError:
abort("Couldn't open the ssh-config file "
"'{0}'".format(env.ssh_config_path))
except Exception:
abort("Couldn't parse the ssh-config file "
"'{0}'".format(env.ssh_config_path))
else:
env.ssh_config = None
# check for a gateway
try:
env.gateway = config.get('connection', 'gateway')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.gateway = None
# Check for an encrypted_data_bag_secret file and set the env option
try:
env.encrypted_data_bag_secret = config.get('userinfo',
'encrypted_data_bag_secret')
except ConfigParser.NoOptionError:
env.encrypted_data_bag_secret = None
if env.encrypted_data_bag_secret:
env.encrypted_data_bag_secret = os.path.expanduser(
env.encrypted_data_bag_secret)
try:
open(env.encrypted_data_bag_secret)
except IOError as e:
abort("Failed to open encrypted_data_bag_secret file at "
"'{0}'".format(env.encrypted_data_bag_secret))
try:
sudo_prefix = config.get('ssh', 'sudo_prefix', raw=True)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
pass
else:
env.sudo_prefix = sudo_prefix
try:
env.user = config.get('userinfo', 'user')
except ConfigParser.NoOptionError:
if not env.ssh_config_path:
msg = 'You need to define a user in the "userinfo" section'
msg += ' of {0}. Refer to the README for help'
msg += ' (http://github.com/tobami/littlechef)'
abort(msg.format(littlechef.CONFIGFILE))
user_specified = False
else:
user_specified = True
try:
env.password = config.get('userinfo', 'password') or None
except ConfigParser.NoOptionError:
pass
try:
#If keypair-file is empty, assign None or fabric will try to read key "
env.key_filename = config.get('userinfo', 'keypair-file') or None
except ConfigParser.NoOptionError:
pass
if (user_specified and not env.password and not env.key_filename
and not env.ssh_config):
abort('You need to define a password, keypair file, or ssh-config '
'file in {0}'.format(littlechef.CONFIGFILE))
# Node's Chef Solo working directory for storing cookbooks, roles, etc.
try:
env.node_work_path = os.path.expanduser(config.get('kitchen',
'node_work_path'))
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
env.node_work_path = littlechef.node_work_path
else:
if not env.node_work_path:
abort('The "node_work_path" option cannot be empty')
# Follow symlinks
try:
env.follow_symlinks = config.getboolean('kitchen', 'follow_symlinks')
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
env.follow_symlinks = False
# Upload Directory
try:
env.sync_packages_dest_dir = config.get('sync-packages',
'dest-dir')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.sync_packages_dest_dir = None
# Local Directory
try:
env.sync_packages_local_dir = config.get('sync-packages',
'local-dir')
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
env.sync_packages_local_dir = None
# Only read config if fix is being used and we are not creating a new kitchen
if littlechef.__cooking__:
# Called from command line
if env.chef_environment:
print("\nEnvironment: {0}".format(env.chef_environment))
if env.verbose:
print("\nVerbose output on")
if env.loglevel == "debug":
print("\nDebug level on")
if 'new_kitchen' not in sys.argv:
_readconfig()
else:
# runner module has been imported
env.ssh_config = None
env.follow_symlinks = False
env.encrypted_data_bag_secret = None
env.sync_packages_dest_dir = None
env.sync_packages_local_dir = None
########NEW FILE########
__FILENAME__ = solo
#Copyright 2010-2014 Miquel Torres <tobami@gmail.com>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
"""Chef Solo deployment"""
import os
import re
from fabric.api import *
from fabric import colors
from fabric.contrib.files import append, exists, upload_template
from fabric.utils import abort
from littlechef import cookbook_paths
from littlechef import LOGFILE
# Path to local patch
BASEDIR = os.path.abspath(os.path.dirname(__file__).replace('\\', '/'))
def install(distro_type, distro, gems, version, stop_client, method):
"""Calls the appropriate installation function for the given distro"""
if distro_type == "debian":
if gems == "yes":
_gem_apt_install()
elif method == "omnibus":
_omnibus_install(version=version)
else:
chef_versions = ["0.9", "0.10"]
if version not in chef_versions:
abort('Wrong Chef version specified. Valid versions are {0}'.format(
", ".join(chef_versions)))
_apt_install(distro, version, stop_client)
elif distro_type == "rpm":
if gems == "yes":
_gem_rpm_install()
elif method == "omnibus":
_omnibus_install(version=version)
else:
_rpm_install()
elif distro_type == "gentoo":
_emerge_install()
elif distro_type == "pacman":
_gem_pacman_install()
elif distro_type == "freebsd":
_gem_ports_install()
else:
abort('wrong distro type: {0}'.format(distro_type))
def configure(current_node=None):
"""Deploy chef-solo specific files"""
current_node = current_node or {}
# Ensure that the /tmp/chef-solo/cache directory exist
cache_dir = "{0}/cache".format(env.node_work_path)
# First remote call, could go wrong
try:
cache_exists = exists(cache_dir)
except EOFError as e:
abort("Could not login to node, got: {0}".format(e))
if not cache_exists:
with settings(hide('running', 'stdout'), warn_only=True):
output = sudo('mkdir -p {0}'.format(cache_dir))
if output.failed:
error = "Could not create {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Change ownership of /tmp/chef-solo/ so that we can rsync
with hide('running', 'stdout'):
with settings(warn_only=True):
output = sudo(
'chown -R {0} {1}'.format(env.user, env.node_work_path))
if output.failed:
error = "Could not modify {0} dir. ".format(env.node_work_path)
error += "Do you have sudo rights?"
abort(error)
# Set up chef solo configuration
logging_path = os.path.dirname(LOGFILE)
if not exists(logging_path):
sudo('mkdir -p {0}'.format(logging_path))
if not exists('/etc/chef'):
sudo('mkdir -p /etc/chef')
# Set parameters and upload solo.rb template
reversed_cookbook_paths = cookbook_paths[:]
reversed_cookbook_paths.reverse()
cookbook_paths_list = '[{0}]'.format(', '.join(
['"{0}/{1}"'.format(env.node_work_path, x) \
for x in reversed_cookbook_paths]))
data = {
'node_work_path': env.node_work_path,
'cookbook_paths_list': cookbook_paths_list,
'environment': current_node.get('chef_environment', '_default'),
'verbose': "true" if env.verbose else "false"
}
with settings(hide('everything')):
try:
upload_template(os.path.join(BASEDIR, 'solo.rb'), '/etc/chef/',
context=data, use_sudo=True, backup=False,
mode=0400)
except SystemExit:
error = ("Failed to upload '/etc/chef/solo.rb'\nThis "
"can happen when the deployment user does not have a "
"home directory, which is needed as a temporary location")
abort(error)
with hide('stdout'):
sudo('chown root:$(id -g -n root) {0}'.format('/etc/chef/solo.rb'))
def check_distro():
"""Check that the given distro is supported and return the distro type"""
def print_supported_distros(platform):
supported_distros = (
"Currently supported distros are:"
" Debian, Ubuntu, RHEL (CentOS, RHEL, SL),"
" Gentoo, Arch Linux or FreeBSD")
print supported_distros
abort("Unsupported distro '{0}'".format(platform))
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
# use /bin/sh to determine our OS. FreeBSD doesn't have /bin/bash
original_shell = env.shell
env.shell = "/bin/sh -c"
os_implementation = run('uname -o')
if 'Linux' in os_implementation:
env.shell = original_shell
output = sudo('cat /etc/issue')
if 'Debian GNU/Linux 5.0' in output:
distro = "lenny"
distro_type = "debian"
platform = "debian"
elif 'Debian GNU/Linux 6.0' in output:
distro = "squeeze"
distro_type = "debian"
platform = "debian"
elif 'Debian GNU/Linux 7' in output:
distro = "wheezy"
distro_type = "debian"
platform = "debian"
elif 'Ubuntu' in output:
distro = sudo('lsb_release -cs')
distro_type = "debian"
platform = "ubuntu"
elif 'CentOS' in output:
distro = "CentOS"
distro_type = "rpm"
platform = "centos"
elif 'Red Hat Enterprise Linux' in output:
distro = "Red Hat"
distro_type = "rpm"
platform = "redhat"
elif 'Scientific Linux' in output:
distro = "Scientific Linux"
distro_type = "rpm"
platform = "scientific"
elif 'This is \\n.\\O (\\s \\m \\r) \\t' in output:
distro = "Gentoo"
distro_type = "gentoo"
platform = "gentoo"
elif 'Arch Linux \\r (\\n) (\\l)' in output:
distro = "Arch Linux"
distro_type = "pacman"
platform = "arch"
else:
print_supported_distros(output)
elif 'FreeBSD' in os_implementation:
env.shell = "/bin/sh -c"
distro = "FreeBSD"
distro_type = "freebsd"
platform = "freebsd"
else:
print_supported_distros(os_implementation)
return distro_type, distro, platform
def _gem_install():
"""Install Chef from gems"""
# Install RubyGems from Source
rubygems_version = "1.8.10"
ruby_version = "'~> 10.0'"
run('wget http://production.cf.rubygems.org/rubygems/rubygems-{0}.tgz'
.format(rubygems_version))
run('tar zxf rubygems-{0}.tgz'.format(rubygems_version))
with cd('rubygems-{0}'.format(rubygems_version)):
sudo('ruby setup.rb --no-format-executable'.format(rubygems_version))
sudo('rm -rf rubygems-{0} rubygems-{0}.tgz'.format(rubygems_version))
sudo('gem install --no-rdoc --no-ri chef -v {0}'.format(ruby_version))
def _gem_apt_install():
"""Install Chef from gems for apt based distros"""
with hide('stdout', 'running'):
sudo('apt-get update')
prefix = "DEBIAN_FRONTEND=noninteractive"
packages = "ruby ruby-dev libopenssl-ruby irb build-essential wget"
packages += " ssl-cert rsync"
sudo('{0} apt-get --yes install {1}'.format(prefix, packages))
_gem_install()
def _gem_rpm_install():
"""Install Chef from gems for rpm based distros"""
_add_rpm_repos()
needed_packages = "make ruby ruby-shadow gcc gcc-c++ ruby-devel wget rsync"
with show('running'):
sudo('yum -y install {0}'.format(needed_packages))
_gem_install()
def _gem_pacman_install():
"""Install Chef from gems for pacman based distros"""
with hide('stdout', 'running'):
sudo('pacman -Syu --noconfirm')
with show('running'):
sudo('pacman -S --noconfirm ruby base-devel wget rsync')
sudo('gem install --no-rdoc --no-ri chef')
def _gem_ports_install():
"""Install Chef from gems for FreeBSD"""
with hide('stdout', 'running'):
sudo('grep -q RUBY_VER /etc/make.conf || echo \'RUBY_VER=1.9\' >> /etc/make.conf')
sudo('grep -q RUBY_DEFAULT_VER /etc/make.conf || echo \'RUBY_DEFAULT_VER=1.9\' >> /etc/make.conf')
with show('running'):
sudo('which -s rsync || pkg_add -r rsync')
sudo('which -s perl || pkg_add -r perl')
sudo('which -s m4 || pkg_add -r m4')
sudo('which -s chef || (cd /usr/ports/sysutils/rubygem-chef && make -DBATCH install)')
def _omnibus_install(version):
"""Install Chef using the omnibus installer"""
url = "https://www.opscode.com/chef/install.sh"
with hide('stdout', 'running'):
local("""python -c "import urllib; print urllib.urlopen('{0}').read()" > /tmp/install.sh""".format(url))
put('/tmp/install.sh', '/tmp/install.sh')
print("Downloading and installing Chef {0}...".format(version))
with hide('stdout'):
sudo("""bash /tmp/install.sh -v {0}""".format(version))
def _apt_install(distro, version, stop_client='yes'):
"""Install Chef for debian based distros"""
with settings(hide('stdout', 'running')):
with settings(hide('warnings'), warn_only=True):
wget_is_installed = sudo('which wget')
if wget_is_installed.failed:
# Install wget
print "Installing wget..."
# we may not be able to install wget without updating first
sudo('apt-get update')
output = sudo('apt-get --yes install wget')
if output.failed:
print(colors.red("Error while installing wget:"))
abort(output.lstrip("\\n"))
rsync_is_installed = sudo('which rsync')
if rsync_is_installed.failed:
# Install rsync
print "Installing rsync..."
# we may not be able to install rsync without updating first
sudo('apt-get update')
output = sudo('apt-get --yes install rsync')
if output.failed:
print(colors.red("Error while installing rsync:"))
abort(output.lstrip("\\n"))
# Add Opscode Debian repo
print("Setting up Opscode repository...")
if version == "0.9":
version = ""
else:
version = "-" + version
append('opscode.list',
'deb http://apt.opscode.com/ {0}{1} main'.format(distro, version),
use_sudo=True)
sudo('mv opscode.list /etc/apt/sources.list.d/')
# Add repository GPG key
gpg_key = "http://apt.opscode.com/packages@opscode.com.gpg.key"
sudo('wget -qO - {0} | sudo apt-key add -'.format(gpg_key))
# Load package list from new repository
with settings(hide('warnings'), warn_only=True):
output = sudo('apt-get update')
if output.failed:
print(colors.red(
"Error while executing 'apt-get install chef':"))
abort(output)
# Install Chef Solo
print("Installing Chef Solo")
# Ensure we don't get asked for the Chef Server
command = "echo chef chef/chef_server_url select ''"
command += " | debconf-set-selections"
sudo(command)
# Install package
with settings(hide('warnings'), warn_only=True):
output = sudo('apt-get --yes install ucf chef')
if output.failed:
print(colors.red(
"Error while executing 'apt-get install chef':"))
abort(output)
if stop_client == 'yes':
# We only want chef-solo, stop chef-client and remove it from init
sudo('update-rc.d -f chef-client remove')
with settings(hide('warnings'), warn_only=True):
# The logrotate entry will force restart of chef-client
sudo('rm /etc/logrotate.d/chef')
with settings(hide('warnings'), warn_only=True):
output = sudo('service chef-client stop')
if output.failed:
# Probably an older distro without the newer "service" command
sudo('/etc/init.d/chef-client stop')
def _add_rpm_repos():
"""Add RPM repositories for Chef
Opscode doesn't officially support an ELFF resporitory any longer:
http://wiki.opscode.com/display/chef/Installation+on+RHEL+and+CentOS+5+with
+RPMs
Using http://rbel.frameos.org/
"""
version_string = sudo('cat /etc/redhat-release')
try:
rhel_version = re.findall("\d[\d.]*", version_string)[0].split('.')[0]
except IndexError:
print "Warning: could not correctly detect the Red Hat version"
print "Defaulting to 5 packages"
rhel_version = "5"
epel_release = "epel-release-5-4.noarch"
if rhel_version == "6":
epel_release = "epel-release-6-8.noarch"
with show('running'):
# Install the EPEL Yum Repository.
with settings(hide('warnings', 'running'), warn_only=True):
repo_url = "http://dl.fedoraproject.org"
repo_path = "/pub/epel/{0}/i386/".format(rhel_version)
repo_path += "{0}.rpm".format(epel_release)
output = sudo('rpm -Uvh {0}{1}'.format(repo_url, repo_path))
installed = "package {0} is already installed".format(epel_release)
if output.failed and installed not in output:
abort(output)
# Install the FrameOS RBEL Yum Repository.
with settings(hide('warnings', 'running'), warn_only=True):
repo_url = "http://rbel.co"
repo_path = "/rbel{0}".format(rhel_version)
output = sudo('rpm -Uvh {0}{1}'.format(repo_url, repo_path))
installed = "package rbel{0}-release-1.0-2.el{0}".format(
rhel_version)
installed += ".noarch is already installed"
if output.failed and installed not in output:
abort(output)
def _rpm_install():
"""Install Chef for rpm based distros"""
_add_rpm_repos()
with show('running'):
# Ensure we have an up-to-date ruby, as we need >=1.8.7
sudo('yum -y upgrade ruby*')
# Install Chef
sudo('yum -y install rubygem-chef')
def _emerge_install():
"""Install Chef for Gentoo"""
with show('running'):
sudo("USE='-test' ACCEPT_KEYWORDS='~amd64' emerge -u chef")
########NEW FILE########
__FILENAME__ = save_ip
"""Gets the IP and adds or updates the ipaddress attribute of a node"""
import subprocess
import os
import re
from fabric.api import env
from littlechef import chef
def parse_ip(text):
"""Extract an IPv4 IP from a text string
Uses an IP Address Regex: http://www.regular-expressions.info/examples.html
"""
ip_matches = re.findall(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', text)
ip = ip_matches[0] if ip_matches else None
return ip
def execute(node):
proc = subprocess.Popen(['ping', '-c', '1', node['name']],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
resp, error = proc.communicate()
if not error:
# Split output into lines and parse the first line to get the IP
ip = parse_ip(resp.split("\n")[0])
if not ip:
print "Warning: could not get IP address from node {0}".format(
node['name'])
print "Node {0} has IP {1}".format(node['name'], ip)
# Update with the ipaddress field in the corresponding node.json
node['ipaddress'] = ip
os.remove(chef.save_config(node, ip))
else:
print "Warning: could not resolve node {0}".format(node['name'])
########NEW FILE########
__FILENAME__ = save_xen_info
"""Saves some virtualization attributes in case the node is a Xen host"""
import subprocess
import os
import json
from fabric.api import env, sudo, abort, hide
from littlechef import chef, lib
def execute(node):
"""Uses ohai to get virtualization information which is then saved to then
node file
"""
with hide('everything'):
virt = json.loads(sudo('ohai virtualization'))
if not len(virt) or virt[0][1] != "host":
# It may work for virtualization solutions other than Xen
print("This node is not a Xen host, doing nothing")
return
node['virtualization'] = {
'role': 'host',
'system': 'xen',
'vms': [],
}
# VMs
with hide('everything'):
vm_list = sudo("xm list")
for vm in vm_list.split("\n")[2:]:
data = vm.split()
if len(data) != 6:
break
node['virtualization']['vms'].append({
'fqdn': data[0], 'RAM': data[2], 'cpus': data[3]})
print("Found {0} VMs for this Xen host".format(
len(node['virtualization']['vms'])))
# Save node file and remove the returned temp file
del node['name']
os.remove(chef.save_config(node, True))
########NEW FILE########
__FILENAME__ = bad
"""Bad LittleChef plugin"""
def execute():
"""I am not actually valid Python code"""
I am a syntax error
########NEW FILE########
__FILENAME__ = dummy
"""Dummy LittleChef plugin"""
def execute():
"""Working plugin"""
print "Worked!"
########NEW FILE########
__FILENAME__ = test_base
import os
import unittest
from littlechef import runner
class BaseTest(unittest.TestCase):
def setUp(self):
self.nodes = [
'nestedroles1',
'testnode1',
'testnode2',
'testnode3.mydomain.com',
'testnode4'
]
runner.__testing__ = True
def tearDown(self):
for nodename in self.nodes + ["extranode"]:
filename = 'tmp_' + nodename + '.json'
if os.path.exists(filename):
os.remove(filename)
extra_node = os.path.join("nodes", "extranode" + '.json')
if os.path.exists(extra_node):
os.remove(extra_node)
runner.env.chef_environment = None
runner.env.hosts = []
runner.env.all_hosts = []
runner.env.ssh_config = None
runner.env.key_filename = None
runner.env.node_work_path = None
runner.env.encrypted_data_bag_secret = None
########NEW FILE########
__FILENAME__ = test_command
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
import unittest
import subprocess
import os
import platform
import shutil
from os.path import join, normpath, abspath, split
import sys
env_path = "/".join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-1])
sys.path.insert(0, env_path)
import littlechef
# Set some convenience variables
test_path = split(normpath(abspath(__file__)))[0]
littlechef_top = normpath(join(test_path, '..'))
if platform.system() == 'Windows':
fix = join(littlechef_top, 'fix.cmd')
WIN32 = True
else:
fix = join(littlechef_top, 'fix')
WIN32 = False
class BaseTest(unittest.TestCase):
def setUp(self):
"""Change to the test directory"""
self.set_location()
def set_location(self, location=test_path):
"""Change directories to a known location"""
os.chdir(location)
def execute(self, call):
"""Executes a command and returns stdout and stderr"""
if WIN32:
proc = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen(call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc.communicate()
class TestConfig(BaseTest):
def tearDown(self):
self.set_location()
def test_not_a_kitchen(self):
"""Should exit with error when not a kitchen directory"""
# Change to parent dir, which has no nodes/cookbooks/roles dir
self.set_location(littlechef_top)
# Call fix from the current directory above "tests/"
resp, error = self.execute([fix, 'node:a'])
self.assertTrue("Fatal error" in error, resp)
self.assertTrue(
'No {0} file found'.format(littlechef.CONFIGFILE) in error, error)
self.assertEquals(resp, "", resp)
def test_version(self):
"""Should output the correct Little Chef version"""
resp, error = self.execute([fix, '-v'])
self.assertEquals(resp, "",
"Response should be empty, version should be in stderr")
self.assertTrue(
'LittleChef {0}'.format(littlechef.__version__) in error)
def test_list_commands(self):
"""Should output a list of available commands"""
resp, error = self.execute([fix, '-l'])
self.assertEquals(error, "")
expected = "Starts a Chef Solo configuration run"
self.assertTrue(expected in resp)
commands = resp.split('\nAvailable commands:\n')[-1]
commands = filter(None, commands.split('\n'))
self.assertEquals(len(commands), 21)
def test_verbose(self):
"""Should turn on verbose output"""
resp, error = self.execute([fix, '--verbose', 'list_nodes'])
self.assertEquals(error, "", error)
self.assertTrue('Verbose output on' in resp, resp)
def test_debug(self):
"""Should turn on debug loglevel"""
resp, error = self.execute([fix, '--debug', 'list_nodes'])
self.assertEquals(error, "", error)
self.assertTrue('Debug level on' in resp, resp)
class TestEnvironment(BaseTest):
def test_no_valid_value(self):
"""Should error out when the env value is empty or is a fabric task"""
resp, error = self.execute([fix, 'list_nodes', '--env'])
self.assertEquals(resp, "")
self.assertTrue(
"error: argument -e/--env: expected one argument" in error, error)
resp, error = self.execute([fix, '--env', 'list_nodes'])
self.assertEquals(resp, "")
self.assertTrue("error: No value given for --env" in error, error)
cmd = [fix, '--env', 'nodes_with_role:base', 'role:base']
resp, error = self.execute(cmd)
self.assertEquals(resp, "")
self.assertTrue("error: No value given for --env" in error, error)
def test_valid_environment(self):
"""Should set the chef_environment value when one is given"""
resp, error = self.execute([fix, 'list_nodes', '--env', 'staging'])
self.assertEquals(error, "", error)
self.assertTrue("Environment: staging" in resp, resp)
class TestRunner(BaseTest):
def test_no_node_given(self):
"""Should abort when no node is given"""
resp, error = self.execute([fix, 'node:'])
self.assertTrue("Fatal error: No node was given" in error)
def test_plugin(self):
"""Should execute the given plugin"""
resp, error = self.execute([fix, 'node:testnode1', 'plugin:notthere'])
expected = ", could not find 'notthere.py' in the plugin directory"
self.assertTrue(expected in error, resp + error)
resp, error = self.execute([fix, 'node:testnode1', 'plugin:bad'])
expected = "Found plugin 'bad', but it seems to have a syntax error:"
expected += " invalid syntax (bad.py, line 6)"
self.assertTrue(expected in error, resp + error)
resp, error = self.execute([fix, 'node:testnode1', 'plugin:dummy'])
expected = "Executing plugin '{0}' on {1}".format("dummy", "testnode1")
self.assertTrue(expected in resp, resp + error)
def test_list_plugins(self):
"""Should print a list of available plugins"""
resp, error = self.execute([fix, 'list_plugins'])
self.assertTrue("List of available plugins:" in resp, resp)
self.assertTrue("bad: Plugin has a syntax error" in resp, resp)
self.assertTrue("dummy: Dummy LittleChef plugin" in resp, resp)
class TestCookbooks(BaseTest):
def test_list_recipes(self):
"""Should list available recipes"""
resp, error = self.execute([fix, 'list_recipes'])
self.assertEquals(error, "")
self.assertTrue('subversion::client' in resp)
self.assertTrue('subversion::server' in resp)
def test_list_recipes_site_cookbooks(self):
"""Should give priority to site-cookbooks information"""
resp, error = self.execute([fix, 'list_recipes'])
self.assertTrue('Modified by site-cookbooks' in resp)
def test_list_recipes_detailed(self):
"""Should show a detailed list of available recipes"""
resp, error = self.execute([fix, 'list_recipes_detailed'])
self.assertTrue('subversion::client' in resp)
for field in ['description', 'version', 'dependencies', 'attributes']:
self.assertTrue(field in resp)
def test_list_recipes_detailed_site_cookbooks(self):
"""Should show a detailed list of available recipes with site-cookbook
priority
"""
resp, error = self.execute([fix, 'list_recipes_detailed'])
self.assertTrue('0.8.4' in resp)
def test_no_metadata(self):
"""Should abort if cookbook has no metadata.json"""
bad_cookbook = join(test_path, 'cookbooks', 'bad_cookbook')
os.mkdir(bad_cookbook)
try:
resp, error = self.execute([fix, 'list_recipes'])
except OSError:
self.fail("Couldn't execute {0}".format(fix))
finally:
os.rmdir(bad_cookbook)
expected = 'Fatal error: Cookbook "bad_cookbook" has no metadata.json'
self.assertTrue(expected in error)
class TestListRoles(BaseTest):
def test_list_roles(self):
"""Should list all roles"""
resp, error = self.execute([fix, 'list_roles'])
self.assertTrue('base' in resp and 'example aplication' in resp)
def test_list_roles_detailed(self):
"""Should show a detailed list of all roles"""
resp, error = self.execute([fix, 'list_roles_detailed'])
self.assertTrue('base' in resp and 'example aplication' in resp)
class TestListNodes(BaseTest):
def test_list_nodes(self):
"""Should list all nodes"""
resp, error = self.execute([fix, 'list_nodes'])
for node in ['testnode1', 'testnode2', 'testnode3.mydomain.com']:
self.assertTrue(node in resp)
self.assertTrue('Recipes: subversion' in resp)
def test_list_nodes_in_env(self):
"""Should list all nodes in an environment"""
resp, error = self.execute([fix, '--env', 'staging', 'list_nodes'])
self.assertTrue('testnode2' in resp)
self.assertFalse('testnode1' in resp)
self.assertFalse('testnode3.mydomain.com' in resp)
def test_list_nodes_detailed(self):
"""Should show a detailed list of all nodes"""
resp, error = self.execute([fix, 'list_nodes_detailed'])
self.assertTrue('testnode1' in resp)
self.assertTrue('Recipe: subversion' in resp)
def test_list_nodes_with_recipe(self):
"""Should list all nodes with a recipe in the run list"""
resp, error = self.execute([fix, 'list_nodes_with_recipe:subversion'])
self.assertTrue('testnode1' in resp)
self.assertTrue('Recipes: subversion' in resp)
resp, error = self.execute([fix, 'list_nodes_with_recipe:apache2'])
self.assertFalse('testnode1' in resp)
class TestNewKitchen(BaseTest):
def setUp(self):
self.new_kitchen = join(test_path, 'test_new_kitchen')
os.mkdir(self.new_kitchen)
self.set_location(self.new_kitchen)
def tearDown(self):
shutil.rmtree(self.new_kitchen)
self.set_location()
def test_new_kitchen_creates_required_directories(self):
resp, error = self.execute([fix, 'new_kitchen'])
kitchen_contents = os.listdir(os.getcwd())
self.assertTrue('roles' in kitchen_contents)
self.assertTrue('cookbooks' in kitchen_contents)
self.assertTrue('site-cookbooks' in kitchen_contents)
self.assertTrue('data_bags' in kitchen_contents)
self.assertTrue('nodes' in kitchen_contents)
self.assertTrue('environments' in kitchen_contents)
self.assertTrue(littlechef.CONFIGFILE in kitchen_contents)
def test_new_kitchen_can_list_nodes(self):
self.execute([fix, 'new_kitchen'])
with open(littlechef.CONFIGFILE, "w") as configfh:
print >> configfh, "[userinfo]"
print >> configfh, "user = testuser"
print >> configfh, "password = testpassword"
resp, error = self.execute([fix, 'list_nodes'])
self.assertFalse(error)
self.assertTrue('Found 0 nodes' in resp)
self.assertEqual('', error)
########NEW FILE########
__FILENAME__ = test_lib
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#
import os
import json
from fabric.api import env
from mock import patch
from nose.tools import raises
import sys
env_path = "/".join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-1])
sys.path.insert(0, env_path)
from littlechef import chef, lib, solo, exceptions
from test_base import BaseTest
littlechef_src = os.path.split(os.path.normpath(os.path.abspath(__file__)))[0]
littlechef_top = os.path.normpath(os.path.join(littlechef_src, '..'))
class TestSolo(BaseTest):
def test_configure_no_sudo_rights(self):
"""Should abort when user has no sudo rights"""
env.host_string = "extranode"
with patch.object(solo, 'exists') as mock_exists:
mock_exists.return_value = False
with patch.object(solo, 'sudo') as mock_sudo:
mock_sudo.failed = True
self.assertRaises(SystemExit, solo.configure)
@raises(SystemExit)
@patch('littlechef.solo.exists')
def test_configure_bad_credentials(self, mock_exists):
"""Should return True when node has been synced"""
mock_exists.side_effect = EOFError(
'/usr/lib64/python2.6/getpass.py:83: GetPassWarning: '
'Can not control echo on the terminal.')
solo.configure()
class TestLib(BaseTest):
def test_get_node_not_found(self):
"""Should get empty template when node is not found"""
name = 'Idon"texist'
expected = {'chef_environment': '_default', 'name': name, 'run_list': []}
self.assertEqual(lib.get_node(name), expected)
def test_get_node_found(self):
"""Should get node data when node is found"""
expected = {
'chef_environment': 'production',
'name': 'testnode1',
'run_list': ['recipe[subversion]'],
}
self.assertEqual(lib.get_node('testnode1'), expected)
def test_get_node_default_env(self):
"""Should set env to _default when node sets no chef_environment"""
expected = {
'chef_environment': '_default',
'name': 'nestedroles1',
'run_list': ['role[top_level_role]'],
'tags': ['top'],
}
self.assertEqual(lib.get_node('nestedroles1'), expected)
def test_get_nodes(self):
"""Should return all configured nodes when no environment is given"""
found_nodes = lib.get_nodes()
self.assertEqual(len(found_nodes), len(self.nodes))
expected_keys = ['name', 'chef_environment', 'run_list']
for node in found_nodes:
self.assertTrue(all([key in node for key in expected_keys]))
def test_get_nodes_in_env(self):
"""Should list all nodes in the given environment"""
self.assertEqual(len(lib.get_nodes("production")), 3)
self.assertEqual(len(lib.get_nodes("staging")), 1)
def test_nodes_with_role(self):
"""Should return nodes when role is present in the explicit run_list"""
nodes = list(lib.get_nodes_with_role('all_you_can_eat'))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0]['name'], 'testnode2')
self.assertTrue('role[all_you_can_eat]' in nodes[0]['run_list'])
def test_nodes_with_role_expanded(self):
"""Should return nodes when role is present in the expanded run_list"""
# nested role 'base'
nodes = list(lib.get_nodes_with_role('base'))
self.assertEqual(len(nodes), 2)
expected_nodes = ['nestedroles1', 'testnode2']
for node in nodes:
self.assertTrue(node['name'] in expected_nodes)
expected_nodes.remove(node['name'])
# Find node regardless of recursion level of role sought
for role in ['top_level_role', 'sub_role', 'sub_sub_role']:
nodes = list(lib.get_nodes_with_role(role))
self.assertEqual(len(nodes), 1)
self.assertTrue(nodes[0]['name'], 'nestedroles1')
def test_nodes_with_role_wildcard(self):
"""Should return node when wildcard is given and role is asigned"""
nodes = list(lib.get_nodes_with_role('all_*'))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0]['name'], 'testnode2')
# Prefix with no wildcard
nodes = list(lib.get_nodes_with_role('all_'))
self.assertEqual(len(nodes), 0)
# Nodes with at least one role
nodes = list(lib.get_nodes_with_role('*'))
self.assertEqual(len(nodes), 2)
nodes = list(lib.get_nodes_with_role(''))
self.assertEqual(len(nodes), 0)
def test_nodes_with_role_in_env(self):
"""Should return node when role is asigned and environment matches"""
nodes = list(lib.get_nodes_with_role('all_you_can_eat', 'staging'))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0]['name'], 'testnode2')
# No nodes in production with this role
nodes = list(lib.get_nodes_with_role('all_you_can_eat', 'production'))
self.assertFalse(len(nodes))
def test_nodes_with_recipe(self):
"""Should return node when recipe is in the explicit run_list"""
nodes = list(lib.get_nodes_with_recipe('vim'))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0]['name'], 'testnode3.mydomain.com')
def test_nodes_with_recipe_expanded(self):
"""Should return node when recipe is in the expanded run_list"""
# 'subversion' is in the 'base' role
nodes = list(lib.get_nodes_with_recipe('subversion'))
self.assertEqual(len(nodes), 4)
# man recipe inside role "all_you_can_eat" and in testnode4
nodes = list(lib.get_nodes_with_recipe('man'))
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0]['name'], 'testnode2')
def test_nodes_with_recipe_wildcard(self):
"""Should return node when wildcard is given and role is asigned"""
nodes = list(lib.get_nodes_with_recipe('sub*'))
self.assertEqual(len(nodes), 4)
# Get node with at least one recipe
nodes = list(lib.get_nodes_with_recipe('*'))
self.assertEqual(len(nodes), 5)
nodes = list(lib.get_nodes_with_role(''))
self.assertEqual(len(nodes), 0)
def test_nodes_with_recipe_in_env(self):
"""Should return all nodes with a given recipe and in the given env"""
nodes = list(lib.get_nodes_with_recipe('subversion', 'production'))
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0]['name'], 'testnode1')
nodes = list(lib.get_nodes_with_recipe('subversion', 'staging'))
self.assertEqual(len(nodes), 1)
# No nodes in staging with this role
nodes = list(lib.get_nodes_with_recipe('vim', 'staging'))
self.assertFalse(len(nodes))
def test_get_nodes_with_tag(self):
"""Should list all nodes with tag 'top'"""
nodes = list(lib.get_nodes_with_tag('top'))
self.assertEqual(len(nodes), 1)
def test_get_nodes_with_tag_in_env(self):
"""Should list all nodes with tag 'top' in the given environment"""
nodes = list(lib.get_nodes_with_tag('top', 'production'))
self.assertEqual(len(nodes), 0)
nodes = list(lib.get_nodes_with_tag('top', '_default'))
self.assertEqual(len(nodes), 1)
def test_list_recipes(self):
recipes = lib.get_recipes()
self.assertEqual(len(recipes), 6)
self.assertEqual(recipes[1]['name'], 'subversion')
self.assertEqual(recipes[1]['description'],
'Includes the client recipe. Modified by site-cookbooks')
self.assertEqual(recipes[2]['name'], 'subversion::client')
self.assertEqual(recipes[2]['description'],
'Subversion Client installs subversion and some extra svn libs')
self.assertEqual(recipes[3]['name'], 'subversion::server')
self.assertIn('subversion::testrecipe', [r['name'] for r in recipes])
def test_import_plugin(self):
"""Should import the given plugin"""
plugin = lib.import_plugin("dummy")
expected = "Dummy LittleChef plugin"
self.assertEqual(plugin.__doc__, expected)
# Should fail to import a bad plugin module
self.assertRaises(SystemExit, lib.import_plugin, "bad")
def test_get_plugins(self):
"""Should get a list of available plugins"""
plugins = [p for p in lib.get_plugins()]
self.assertEqual(len(plugins), 2)
self.assertEqual(plugins[0]['bad'], "Plugin has a syntax error")
def test_get_environments(self):
"""Should get a list of all environments"""
environments = lib.get_environments()
self.assertEqual(sorted(env['name'] for env in environments),
['production', 'staging'])
def test_get_existing_environment(self):
"""Should return an existing environment object from the kitchen"""
environment = lib.get_environment('production')
self.assertTrue('subversion' in environment['default_attributes'])
self.assertEqual(environment['default_attributes']['subversion']['user'], 'tom')
def test_get__default_environment(self):
"""Should return empty env when name is '_default'"""
expected = {
"name": "_default",
"default_attributes": {},
"json_class": "Chef::Environment",
"chef_type": "environment",
"description": "",
"cookbook_versions": {}
}
self.assertEqual(lib.get_environment('_default'), expected)
@raises(exceptions.FileNotFoundError)
def test_get_nonexisting_environment(self):
"""Should raise FileNotFoundError when environment does not exist"""
lib.get_environment('not-exists')
class TestChef(BaseTest):
def tearDown(self):
chef.remove_local_node_data_bag()
super(TestChef, self).tearDown()
def test_save_config(self):
"""Should create a tmp_extranode.json and a nodes/extranode.json config
file
"""
# Save a new node
env.host_string = 'extranode'
run_list = ["role[base]"]
chef.save_config({"run_list": run_list})
file_path = os.path.join('nodes', 'extranode.json')
self.assertTrue(os.path.exists(file_path))
with open(file_path, 'r') as f:
data = json.loads(f.read())
self.assertEqual(data['run_list'], run_list)
# It should't overwrite existing config files
env.host_string = 'testnode1' # This node exists
run_list = ["role[base]"]
chef.save_config({"run_list": run_list})
with open(os.path.join('nodes', 'testnode1.json'), 'r') as f:
data = json.loads(f.read())
# It should *NOT* have "base" assigned
self.assertEqual(data['run_list'], ["recipe[subversion]"])
def test_get_ipaddress(self):
"""Should add ipaddress attribute when ohai returns correct IP address
"""
class MockSudoReturnValue(str):
succeeded = True
node = {}
fake_ip = "1.1.1.2"
with patch.object(chef, 'sudo') as mock_method:
mocked_ohai_response = '["{0}"]'.format(fake_ip)
mock_method.return_value = MockSudoReturnValue(mocked_ohai_response)
response = chef._get_ipaddress(node)
self.assertTrue(response)
self.assertEqual(node['ipaddress'], fake_ip)
def test_get_ipaddress_attribute_exists(self):
"""Should not save ipaddress when attribute exists"""
class MockSudoReturnValue(str):
succeeded = True
node = {'ipaddress': '1.1.1.1'}
with patch.object(chef, 'sudo') as mock_method:
mocked_ohai_response = '["{0}"]'.format("1.1.1.2")
mock_method.return_value = MockSudoReturnValue(mocked_ohai_response)
response = chef._get_ipaddress(node)
self.assertFalse(response)
self.assertEqual(node['ipaddress'], '1.1.1.1')
def test_get_ipaddress_bad_ohai_output(self):
"""Should abort when ohai's output cannot be parsed"""
class MockSudoReturnValue(str):
succeeded = True
with patch.object(chef, 'sudo') as mock_method:
mocked_ohai_response = ('Invalid gemspec '
'["{0}"]'.format("1.1.1.2"))
mock_method.return_value = MockSudoReturnValue(mocked_ohai_response)
self.assertRaises(SystemExit, chef._get_ipaddress, {})
def test_build_node_data_bag(self):
"""Should create a node data bag with one item per node"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode1.json')
self.assertTrue(os.path.exists(item_path))
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('id' in data and data['id'] == 'testnode1')
self.assertTrue('name' in data and data['name'] == 'testnode1')
self.assertTrue(
'recipes' in data and data['recipes'] == ['subversion'])
self.assertTrue(
'recipes' in data and data['role'] == [])
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
self.assertTrue(os.path.exists(item_path))
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('id' in data and data['id'] == 'testnode2')
self.assertTrue('recipes' in data)
self.assertEqual(data['recipes'], [u'subversion', u'man'])
self.assertTrue('recipes' in data)
self.assertEqual(data['role'], [u'all_you_can_eat'])
self.assertEqual(data['roles'], [u'base', u'all_you_can_eat'])
def test_build_node_data_bag_nonalphanumeric(self):
"""Should create a node data bag when node name contains invalid chars
"""
chef.build_node_data_bag()
# A node called testnode3.mydomain.com will have the data bag id
# 'testnode3', because dots are not allowed.
filename = 'testnode3_mydomain_com'
nodename = filename.replace("_", ".")
item_path = os.path.join('data_bags', 'node', filename + '.json')
self.assertTrue(os.path.exists(item_path), "node file does not exist")
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('id' in data and data['id'] == filename)
self.assertTrue('name' in data and data['name'] == nodename)
def test_automatic_attributes(self):
"""Should add Chef's automatic attributes"""
chef.build_node_data_bag()
# Check node with single word fqdn
testnode1_path = os.path.join('data_bags', 'node', 'testnode1.json')
with open(testnode1_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('fqdn' in data and data['fqdn'] == 'testnode1')
self.assertTrue('hostname' in data and data['hostname'] == 'testnode1')
self.assertTrue('domain' in data and data['domain'] == '')
# Check node with complex fqdn
testnode3_path = os.path.join(
'data_bags', 'node', 'testnode3_mydomain_com.json')
with open(testnode3_path, 'r') as f:
print testnode3_path
data = json.loads(f.read())
self.assertTrue(
'fqdn' in data and data['fqdn'] == 'testnode3.mydomain.com')
self.assertTrue('hostname' in data and data['hostname'] == 'testnode3')
self.assertTrue('domain' in data and data['domain'] == 'mydomain.com')
def test_attribute_merge_cookbook_not_found(self):
"""Should print a warning when merging a node and a cookbook is not
found
"""
# Save new node with a non-existing cookbook assigned
env.host_string = 'extranode'
chef.save_config({"run_list": ["recipe[phantom_cookbook]"]})
self.assertRaises(SystemExit, chef.build_node_data_bag)
def test_attribute_merge_cookbook_default(self):
"""Should have the value found in recipe/attributes/default.rb"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertTrue(data['subversion']['repo_name'] == 'repo')
def test_attribute_merge_environment_default(self):
"""Should have the value found in environment/ENV.json"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode1.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertEqual(data['subversion']['user'], 'tom')
def test_attribute_merge_cookbook_boolean(self):
"""Should have real boolean values for default cookbook attributes"""
chef.build_node_data_bag()
item_path = os.path.join(
'data_bags', 'node', 'testnode3_mydomain_com.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('vim' in data)
self.assertTrue(data['vim']['sucks'] is True)
def test_attribute_merge_site_cookbook_default(self):
"""Should have the value found in
site_cookbooks/xx/recipe/attributes/default.rb
"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertTrue(data['subversion']['repo_dir'] == '/srv/svn2')
def test_attribute_merge_role_not_found(self):
"""Should print a warning when an assigned role if not found"""
# Save new node with a non-existing cookbook assigned
env.host_string = 'extranode'
chef.save_config({"run_list": ["role[phantom_role]"]})
self.assertRaises(SystemExit, chef.build_node_data_bag)
def test_attribute_merge_role_default(self):
"""Should have the value found in the roles default attributes"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertEqual(
data['subversion']['repo_server'], 'role_default_repo_server')
self.assertTrue('other_attr' in data)
self.assertEqual(data['other_attr']['other_key'], 'nada')
def test_attribute_merge_node_normal(self):
"""Should have the value found in the node attributes"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertEqual(data['subversion']['user'], 'node_user')
def test_attribute_merge_role_override(self):
"""Should have the value found in the roles override attributes"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertEqual(data['subversion']['password'], 'role_override_pass')
def test_attribute_merge_environment_override(self):
"""Should have the value found in the environment override attributes"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode1.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('subversion' in data)
self.assertEqual(data['subversion']['password'], 'env_override_pass')
def test_attribute_merge_deep_dict(self):
"""Should deep-merge a dict when it is defined in two different places
"""
chef.build_node_data_bag()
item_path = os.path.join('data_bags', 'node', 'testnode2.json')
with open(item_path, 'r') as f:
data = json.loads(f.read())
self.assertTrue('other_attr' in data)
expected = {
"deep_dict": {
"deep_key1": "node_value1",
"deep_key2": "role_value2"
}
}
self.assertTrue(data['other_attr']['deep_dict'], expected)
def test_sync_node_dummy_attr(self):
"""Should return False when node has a dummy tag or dummy=true"""
self.assertFalse(chef.sync_node({'name': 'extranode', 'dummy': True}))
self.assertFalse(chef.sync_node({'name': 'extranode', 'tags': ['dummy']}))
@patch('littlechef.chef.solo.configure')
@patch('littlechef.chef._get_ipaddress')
@patch('littlechef.chef._synchronize_node')
@patch('littlechef.chef._configure_node')
@patch('littlechef.chef._node_cleanup')
def test_sync_node(self, mock_method1, mock_ipaddress, mock_method3,
mock_method4, mock_method5):
"""Should return True when node has been synced"""
env.host_string = 'extranode'
mock_ipaddress.return_value = False
test_node = {'name': 'extranode', 'dummy': False, 'run_list': []}
self.assertTrue(chef.sync_node(test_node))
########NEW FILE########
__FILENAME__ = test_runner
from ConfigParser import SafeConfigParser
from mock import patch
from nose.tools import raises
from littlechef import runner
from test_base import BaseTest
class TestConfig(BaseTest):
def test_get_config(self):
"""Should read configuration from config file when config.cfg is found
"""
runner._readconfig()
self.assertEqual(runner.env.ssh_config_path, None)
self.assertEqual(runner.env.ssh_config, None)
self.assertEqual(runner.env.user, "testuser")
self.assertEqual(runner.env.password, "testpass")
self.assertEqual(runner.env.key_filename, None)
self.assertEqual(runner.env.node_work_path, "/tmp/chef-solo")
self.assertEqual(runner.env.encrypted_data_bag_secret, None)
self.assertEqual(runner.env.sync_packages_dest_dir, "/srv/repos")
self.assertEqual(runner.env.sync_packages_local_dir, "./repos")
def test_not_a_kitchen(self):
"""Should abort when no config file found"""
with patch.object(SafeConfigParser, 'read') as mock_method:
mock_method.return_value = []
self.assertRaises(SystemExit, runner._readconfig)
class TestNode(BaseTest):
def test_node_one(self):
"""Should configure one node when an existing node name is given"""
runner.node('testnode1')
self.assertEqual(runner.env.hosts, ['testnode1'])
def test_node_several(self):
"""Should configure several nodes"""
runner.node('testnode1', 'testnode2')
self.assertEqual(runner.env.hosts, ['testnode1', 'testnode2'])
def test_node_all(self):
"""Should configure all nodes when 'all' is given"""
runner.node('all')
self.assertEqual(runner.env.hosts, self.nodes)
def test_node_all_in_env(self):
"""Should configure all nodes in a given environment when 'all' is
given and evironment is set"""
runner.env.chef_environment = "staging"
runner.node('all')
self.assertEqual(runner.env.hosts, ['testnode2'])
class TestNodesWithRole(BaseTest):
def test_nodes_with_role(self):
"""Should return a list of nodes with the given role in the run_list"""
runner.nodes_with_role('base')
self.assertEqual(runner.env.hosts, ['nestedroles1', 'testnode2'])
def test_nodes_with_role_in_env(self):
"""Should return a filtered list of nodes with role when an env is given
"""
runner.env.chef_environment = "staging"
runner.nodes_with_role('base')
self.assertEqual(runner.env.hosts, ['testnode2'])
@raises(SystemExit)
def test_nodes_with_role_in_env_not_found(self):
"""Should abort when no nodes with given role found in the environment
"""
runner.env.chef_environment = "production"
runner.nodes_with_role('base')
class TestNodesWithRecipe(BaseTest):
def test_nodes_with_role(self):
"""Should return a list of nodes with the given recipe in the run_list"""
runner.nodes_with_recipe('man')
self.assertEqual(runner.env.hosts, ['testnode2', 'testnode4'])
def test_nodes_with_role_in_env(self):
"""Should return a filtered list of nodes with recipe when an env is given
"""
runner.env.chef_environment = "staging"
runner.nodes_with_recipe('man')
self.assertEqual(runner.env.hosts, ['testnode2'])
@raises(SystemExit)
def test_nodes_with_role_in_env_not_found(self):
"""Should abort when no nodes with given recipe found in the environment
"""
runner.env.chef_environment = "_default"
runner.nodes_with_recipe('man')
class TestNodesWithTag(BaseTest):
def test_nodes_with_tag(self):
"""Should return a list of nodes with the given tag"""
runner.nodes_with_tag('top')
self.assertEqual(runner.env.hosts, ['nestedroles1'])
def test_nodes_with_tag_in_env(self):
"""Should return a filtered list of nodes with tag when an env is given
"""
runner.env.chef_environment = "production"
runner.nodes_with_tag('dummy')
self.assertEqual(runner.env.hosts, ['testnode4'])
@raises(SystemExit)
def test_nodes_with_tag_in_env_not_found(self):
"""Should abort when no nodes with given tag found in the environment
"""
runner.env.chef_environment = "production"
runner.nodes_with_role('top')
########NEW FILE########
| [
"dyangUCI@github.com"
] | dyangUCI@github.com |
236b411cb4481023898e177f51e21412dc57c3ff | 48a7b266737b62da330170ca4fe4ac4bf1d8b663 | /molsysmt/form/openmm_Topology/to_openmm_System.py | ec0c582b7538b5b681e118cc0f7c1a1197793d15 | [
"MIT"
] | permissive | uibcdf/MolSysMT | ddab5a89b8ec2377f383884c5169d147cab01322 | c3d713ba63db24eb8a2426115cf8d9cb3665d225 | refs/heads/main | 2023-08-08T15:04:16.217967 | 2023-08-04T05:49:56 | 2023-08-04T05:49:56 | 137,937,243 | 15 | 3 | MIT | 2023-06-04T20:27:06 | 2018-06-19T19:38:44 | Python | UTF-8 | Python | false | false | 1,223 | py | from molsysmt._private.digestion import digest
@digest(form='openmm.Topology')
def to_openmm_System(item, atom_indices='all', forcefield=None, water_model=None, implicit_solvent=None,
non_bonded_method=None, constraints=None, switch_distance=None,
dispersion_correction=None, ewald_error_tolerance=None):
from openmm import app
from molsysmt.molecular_mechanics import forcefield_to_engine
forcefield = forcefield_to_engine(forcefield,
water_model=water_model, implicit_solvent=implicit_solvent,
engine='OpenMM')
forcefield = app.ForceField(*forcefield)
if non_bonded_method=='no cutoff':
non_bonded_method=app.NoCutoff
if constraints=='hbonds':
contraints=app.HBonds
system = forcefield.createSystem(item, nonbondedMethod=non_bonded_method, constraints=app.HBonds)
if dispersion_correction or ewald_error_tolerance:
forces = {ii.__class__.__name__ : ii for ii in system.getForces()}
if dispersion_correction:
forces['NonbondedForce'].setUseDispersionCorrection(True)
if ewald_error_tolerance:
forces['NonbondedForce'].setEwaldErrorTolerance(ewald_error_tolerance)
return system
| [
"prada.gracia@gmail.com"
] | prada.gracia@gmail.com |
6a23f800b0642149d86c1b273e549390add95953 | 09fd456a6552f42c124c148978289fae1af2d5c3 | /Greedy/1046.py | 38cf41d0ce78d7ec9ca4fb81ad2f34904cf89bd5 | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | # 1046. Last Stone Weight
# We have a collection of stones, each stone has a positive integer weight.
# Each turn, we choose the two heaviest stones and smash them together. Suppose the stones have weights x and y with x <= y. The result of this smash is:
# If x == y, both stones are totally destroyed;
# If x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.
# At the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)
# Example 1:
# Input: [2,7,4,1,8,1]
# Output: 1
# Explanation:
# We combine 7 and 8 to get 1 so the array converts to [2,4,1,1,1] then,
# we combine 2 and 4 to get 2 so the array converts to [2,1,1,1] then,
# we combine 2 and 1 to get 1 so the array converts to [1,1,1] then,
# we combine 1 and 1 to get 0 so the array converts to [1] then that's the value of last stone.
# Note:
# 1 <= stones.length <= 30
# 1 <= stones[i] <= 1000
import heapq
class Solution(object):
def lastStoneWeight(self, stones):
stones = [-val for val in stones]
heapq.heapify(stones)
while len(stones) > 1:
x1 = heapq.heappop(stones)
x2 = heapq.heappop(stones)
if x1 != x2:
heapq.heappush(stones,x1-x2)
if len(stones) == 0:
return 0
return -stones[0]
| [
"hoang2109@gmail.com"
] | hoang2109@gmail.com |
a54e35e05166aadbfab0c2e00094afb31d34ea9e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/38/usersdata/74/15433/submittedfiles/decimal2bin.py | 2762553aa515ae3fd3d920e2479efa3f6bf9e8d9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | # -*- coding: utf-8 -*-
from __future__ import division
n = input('Digite o numero binario: ')
k = n
cont = 0
d = 0
while n>1:
n = n/10
cont = cont+1
while n>=1:
n = n*10
j = n//1
d = d+j*2**cont
cont = cont-1
n = n-j
print('%d'% d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f630dbcad916b681dfd873c5ff8309f1c6505c2d | 82ed0cacf82da9a89fb65d9ddda6e603070aa639 | /reaper/__init__.py | 379855b02338d5c05153e42e2ad57c947ba033cc | [
"MIT"
] | permissive | pombredanne/reaper | 243dd08e8f570f9493c007face1cf2d4a7413f27 | 52f64d3e7d9a658df54475973796b48267f5c8a1 | refs/heads/master | 2021-01-24T03:36:30.781446 | 2015-11-12T03:48:46 | 2015-11-12T03:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,629 | py | """
Reaper
Deprecation warnings that turns automatically to Exception once your package version is bumped.
"""
__version__ = '0.0.1'
import warnings
import semver
import traitlets
class DeprecationException(DeprecationWarning):pass
class DeprecationReaper:
"""
Decorator for a function to be deprecated and remove.
The function will trigger a DeprecationWarning when called while the `versionspec` is not satisfied,
then raise once the version spec is satisfied.
Deprecation warning Example:
In [1]: from reaper import deprecate
In [2]: @deprecate("IPython",">=5.0.0")
...: def function(value):
...: return value
...:
In [3]: function(1)
DeprecationWarning: Support of `function` will end with IPython>=5.0.0
Out[3]: 1
Deprecation Error Example:
In [4]: import IPython
In [5]: IPython.__version__='5.0.0'
In [6]: @deprecate("IPython",">=5.0.0")
...: def function(value):
...: return value
...:
---------------------------------------------------------------------------
DeprecationWarning Traceback (most recent call last)
<ipython-input-6-52c92c195b7c> in <module>()
----> 1 @deprecate("IPython",">=5.0.0")
2 def function(value):
3 return value
4
DeprecationWarning: `function` is not supported on IPython>=5.0.0
"""
def __init__(self, package, versionspec):
# if something deprecated '>=4.1.0' we want it to raise during the 4.1.0-dev, and 4.1.0-rc,
# not just when we release 4.1.0, so remove any extra-tags.
versionspec = versionspec.split('-')[0]
current_version = traitlets.import_item(package+'.__version__')
self.match = semver.match(current_version, versionspec)
self.package = package
self.spec = versionspec
def __call__(self, wrapped):
data = {
'name':wrapped.__qualname__,
'p':self.package,
's':self.spec,
}
if self.match:
raise DeprecationException("`{name}` is not supported on {p}{s}".format(**data))
else:
def _wrap(*args, **kwargs):
warnings.warn("Support of `{name}` will end with {p}{s}".format(**data), DeprecationWarning, stacklevel=2)
return wrapped(*args, **kwargs)
return _wrap
deprecate = DeprecationReaper
| [
"bussonniermatthias@gmail.com"
] | bussonniermatthias@gmail.com |
dde1a97b3865fb7a6766da26d1bc744f1ce5fca6 | 500047f47a6b372fa7ff1e96b11315ee26acf5ef | /Chapter-07/text_ctrl.py | bbf85947f6e09f04be5a8a4f152c8b65751618ee | [] | no_license | ra2003/Tkinter-In-Action | 9f3a80bb2cab8dccf78621915f234f80cf79c58d | 2a35ae029c2cfabb53adee8dae5fd0a7c6db817f | refs/heads/master | 2022-03-02T16:25:26.146299 | 2019-10-07T06:36:41 | 2019-10-07T06:36:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | #!/usr/bin/python3
import tkinter as tk
class TextFrame(tk.Frame):
def __init__(self, parent):
super().__init__()
parent.title('Text Entry Example')
parent.geometry("300x100")
panel = tk.Frame()
basicLabel = tk.Label(panel, text="Basic Control:").grid(row=0, column=0, sticky=tk.W)
basicText = tk.Entry(panel, bg="white")
basicText.grid(row=0, column=1, sticky=tk.W+tk.E)
basicText.insert(0, "I've entered some text!")
pwdLabel = tk.Label(panel, text="Password:",).grid(row=1, column=0, sticky=tk.W)
pwdText = tk.Entry(panel, bg="white", show="*")
pwdText.grid(row=1, column=1, sticky=tk.W+tk.E)
pwdText.insert(0, "password")
panel.pack(fill=tk.BOTH, expand=1)
def main():
app = tk.Tk()
TextFrame(app)
app.mainloop()
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | ra2003.noreply@github.com |
21e477ad9b77ff873b0306759123da5fac5fd96d | 3a6d382503e11753dd81b291145847a2eabb8ec6 | /experimental/dsmith/lab/autotest/opencl.py | 7607641290ba1f03b1ffb994ec921ded7b2218a1 | [] | no_license | QuXing9/phd | 7e6f107c20e0b3b1de2b25eb99e0b640a4a0bfcf | 58ba53b6d78515ed555e40527f6923e28941cc19 | refs/heads/master | 2022-02-27T03:29:05.126378 | 2019-10-22T02:46:57 | 2019-10-22T02:46:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,609 | py | #!/usr/bin/env python3
import json
import logging
import sys
from pathlib import Path
from typing import List
import autotest
import cldrive
from dsmith.opencl import clsmith
from labm8 import crypto
class OpenCLTestcase(object):
def __init__(self, path: Path):
self.path = path
@property
def src(self):
with open(self.path) as infile:
return infile.read()
def __repr__(self):
return self.src
class CLSmithGenerator(autotest.Generator):
def __init__(self, exec: Path):
self.exec = exec
exec_checksum = crypto.sha1_file(self.exec)
logging.debug(f"CLSmith binary '{self.exec}' {exec_checksum}")
def _clsmith(self, path: Path, *flags, attempt_num=1) -> Path:
""" Generate a program using CLSmith """
if attempt_num >= 1000:
raise autotest.GeneratorError(
f"failed to generate a program using CLSmith after {attempt_num} attempts")
flags = ['-o', path, *flags]
logging.debug(" ".join([self.exec] + flags))
_, returncode, stdout, stderr = clsmith.clsmith(
*flags, exec_path=self.exec)
# A non-zero returncode of clsmith implies that no program was
# generated. Try again
if returncode:
logging.debug(f"CLSmith call failed with returncode {returncode}:")
logging.debug(stdout)
self._clsmith(path, *flags, attempt_num=attempt_num + 1)
return path
def next_batch(self, batch_size: int) -> List[OpenCLTestcase]:
outbox = []
for i in range(batch_size):
generated_kernel = self._clsmith(f"clsmith-{i}.cl")
outbox.append(OpenCLTestcase(generated_kernel))
return outbox
class DeviceUnderTest(object):
def __init__(self, platform: str, device: str, flags: List[str]):
self.device = device
self.platform = platform
self.flags = flags
self.env = cldrive.make_env(self.platform, self.device)
self.ids = self.env.ids()
def run(self, testcase: autotest.testcase_t) -> autotest.output_t:
runtime, returncode, stdout, stderr = clsmith.cl_launcher(
testcase.path, *self.ids, *self.flags)
print(runtime)
print(returncode)
print(stdout[:200])
print(stderr[:200])
class StaticAnalyzer(object):
def __init__(self):
pass
def is_valid(self, testcase: autotest.testcase_t) -> bool:
pass
class DynamicAnalyzer(object):
def __init__(self):
pass
def is_valid(self, testcase: autotest.testcase_t,
duts: List[autotest.DeviceUnderTest],
outputs: List[autotest.output_t]) -> bool:
pass
class Reducer(object):
def __init__(self):
pass
def reduce(self, testcase: autotest.testcase_t,
dut: autotest.DeviceUnderTest) -> autotest.output_t:
pass
def main(args):
assert len(args) == 2
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=logging.DEBUG)
with open(args[0]) as infile:
json_config = json.loads(infile.read())
logging.debug(f"parsed config file '{args[0]}'")
num_batches = int(args[1])
generator = CLSmithGenerator(clsmith.exec_path)
preflight_checks = [
StaticAnalyzer(**x) for x in json_config["preflight_checks"]]
duts = [DeviceUnderTest(**x) for x in json_config["duts"]]
comparator = autotest.Comparator(**json_config["comparator"])
postflight_checks = [
DynamicAnalyzer(**x) for x in json_config["postflight_checks"]]
reducer = Reducer(**json_config["reducer"])
autotest.autotest(num_batches, generator, preflight_checks, duts,
comparator, postflight_checks, reducer)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"chrisc.101@gmail.com"
] | chrisc.101@gmail.com |
e82d51daf8991cd5a31f5ce012cfc827ab74503f | a829617f9ad158df80a569dd02a99c53639fa2c6 | /test/hep/cut2.py | 889f316c9b7b192c2057c2917c3f6a0e06e5ca90 | [] | no_license | alexhsamuel/pyhep | 6db5edd03522553c54c8745a0e7fe98d96d2b7ae | c685756e9065a230e2e84c311a1c89239c5d94de | refs/heads/master | 2021-01-10T14:24:08.648081 | 2015-10-22T13:18:50 | 2015-10-22T13:18:50 | 44,745,881 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,409 | py | #-----------------------------------------------------------------------
# imports
#-----------------------------------------------------------------------
import cPickle
import hep.cuts
from hep.draw import Line
import hep.hist
from hep.hist import ezplot
from numarray import array, Float32
from random import normalvariate, random
import os
#-----------------------------------------------------------------------
# test
#-----------------------------------------------------------------------
if not os.path.isfile("cut2.pickle"):
sig_values = array(shape=(1000, 4), type=Float32)
for i in range(sig_values.shape[0]):
sig_values[i, 0] = normalvariate(0, 1)
sig_values[i, 1] = normalvariate(0, 1)
sig_values[i, 2] = normalvariate(0, 1)
sig_values[i, 3] = normalvariate(0, 1)
bkg_values = array(shape=(5000, 4), type=Float32)
for i in range(bkg_values.shape[0]):
bkg_values[i, 0] = normalvariate( 0, 2)
bkg_values[i, 1] = normalvariate( 1, 1)
bkg_values[i, 2] = normalvariate(-1, 1)
bkg_values[i, 3] = normalvariate(-1, 1)
cPickle.dump((sig_values, bkg_values), file("cut2.pickle", "w"), 1)
else:
sig_values, bkg_values = cPickle.load(file("cut2.pickle"))
cuts = [
(0, "<", random()),
(0, ">", random()),
(1, "<", random()),
(2, ">", random()),
(3, ">", random()),
]
fom_fn = hep.cuts.s_squared_over_s_plus_b
cuts, fom = hep.cuts.iterativeOptimize(sig_values, bkg_values, cuts, fom_fn)
fom_curves = hep.cuts.makeFOMCurves(sig_values, bkg_values, cuts, fom_fn)
gallery = ezplot.Gallery(3 * (1, ), border=0.03)
print "optimal cuts:"
for (var_index, cut_sense, cut_value), fom_curve in zip(cuts, fom_curves):
print " variable #%d %s %f" % (var_index, cut_sense, cut_value)
sig_hist = hep.hist.Histogram1D(120, (-5.0, 5.0), name="signal")
map(sig_hist.accumulate, sig_values[:, var_index])
bkg_hist = hep.hist.Histogram1D(120, (-5.0, 5.0), name="background")
map(bkg_hist.accumulate, bkg_values[:, var_index])
fom_curve.name = "cut FoM after other cuts"
plot = ezplot.curves1D(sig_hist, bkg_hist, fom_curve)
range = hep.hist.function.getRange(fom_curve, sig_hist.axis.range)
plot.annotations.append(Line(
((cut_value, 0), (cut_value, range[1]))))
gallery << plot
print "figure of merit =", fom
gallery.toPSFile("cut2.ps")
| [
"alex@alexsamuel.net"
] | alex@alexsamuel.net |
34ca8c73e475540d5a16ca14550bb83107603dd5 | 7eb67443c603719458f67f7ea369b55c6854bccb | /assignment7/mapper.py | 1d63425582c2c6ca752f28a5d700538581efaf32 | [
"MIT"
] | permissive | IITDU-BSSE06/ads-demystifying-the-logs-Arafat123-iit | bf85971cb35543734118d744a419baf8b722f886 | 2c7b9132eab2161162fc7ac0e9761990ffbaea8f | refs/heads/master | 2021-08-07T12:51:11.703193 | 2017-11-08T07:04:34 | 2017-11-08T07:04:34 | 109,237,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | #!/usr/bin/python
import urlparse
import sys
for line in sys.stdin:
data = line.strip().split(" ")
if len(data) == 10:
a0, a1 ,a2 ,a3 ,a4 ,a5 ,a6 ,a7 ,a8 ,a9= data
# path = urlparse.urlparse(a6).path
print "{0}\t{1}".format(a5,a5)
| [
"noreply@github.com"
] | IITDU-BSSE06.noreply@github.com |
4343873f84f6aaa10eedaa76382224012ae6ba54 | 94de2c5845e70f20f48851595a48e8d3bdb1c3db | /stock/xbrl/xbrl_code_list.py | 77bb77e9dc6d9b5cbb05f7946aa04019d8dbd9f1 | [] | no_license | noeul1114/skyrocket | a0b06821ab56f067d45f2e3edfee03320f795cdf | 9da978e6839d47cab10816eddc1a3112c79e8f72 | refs/heads/master | 2022-12-11T03:40:06.590405 | 2021-04-09T00:47:24 | 2021-04-09T00:47:24 | 242,674,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,574 | py | revenue_word_list_gaap = ['매출액', '영업수익', '수익(매출액)', '매출 및 지분법 손익', '매출', '매출액(매출액)', 'I. 매출액', '수익(순매출액)', '수익', '영업수익(매출액)', 'Ⅰ. 매출액', 'I.매출액', '매출및지분법손익', '매출 및 지분법손익', 'I. 매출액', '매출및 지분법손익']
operating_income_word_list_gaap = ['영업이익', '영업이익(손실)', '영업이익 (손실)', '영업손익', 'V. 영업이익', 'Ⅳ. 영업이익(손실)', 'V.영업이익', '영업손실', 'Ⅴ. 영업이익(손실)']
profit_loss_word_list_gaap = ['반기순이익', '당기순이익(손실)', '반기순이익(손실)', '당기순이익', '분(반)기순이익(손실)', '당기순손익', '분기순손익', '분기순이익', '-당기순이익(손실)', '반기순손실', '분기순이익(손실)', '총포괄순이익(손실)', '3분기순이익(손실)', '분기순손실', '연결분기순이익(손실)', '3분기순이익', '1분기순이익(손실)', '당기순손실', '순이익(손실)', '연결당기순이익(손실)', '당(분)기순이익(손실)', '법인세 차감 후 반기순이익(손실)', '3분기 순이익', '당1분기순이익', '연결 당기순이익', '당(반)기순이익(손실)', '당(반)기순이익', '당반기순이익', 'IX.당기순이익(손실)', 'Ⅰ.당기순이익(손실)', 'VIII.반기순이익', '당3분기순이익', '당(분)기순이익', 'VIII.당기순이익', 'Ⅸ. 당기순이익', '당기순손익(손실)', 'XII. 당기순이익', 'XI. 당기순이익(손실)', 'VIII.당기순이익(손실)']
asset_word_list_gaap = [' 자산총계', ' 자 산 총 계', ' 자산 총계', ' 자 산 총 계', ' 자 산 총 계', ' 자산합계', ' 자 산 계']
capital_word_list_gaap = [' 자본총계', ' 기말금액', ' 당기말', ' 반기말자본', ' 기말', ' 전기말', ' 기말 자본', ' 자 본 총 계', ' 반기말', ' IV. 기말자본', ' 당반기말', ' 자본 총계', ' 당반기말자본', ' 반기말 자본', ' 자 본 총 계', ' 분기말자본', ' 자기주식의 취득', ' 당기말자본', ' 반기자본', ' 분기말', ' 당분기말', ' 당분기말자본', ' 분기말 자본', ' 당분기말(2019년9월30일)잔액', ' 전분기말자본', ' 2019.03.31(당분기말)', ' 기말잔액', ' 기말자본', ' 총계', ' (당반기말)', ' 당분기말 자본', ' 반기순손실', ' 반기말잔액', ' 3분기말자본', ' 2018.9.30(당분기말)', ' 분기순손실', ' 분기말잔액', ' 1분기말자본', ' (당분기말)', ' 자본계', ' (당기말)', ' Ⅲ.기말자본', ' IV.당(전)기말', ' 자 본 총 계', ' 자본합계', ' 자 본 계', ' IV.기말자본', ' 2017년 9월 30일 (분기말)', ' 자본에 직접 반영된 소유주와의 거래', ' 2015.12.31(당기말)', ' III.기말자본', ' 2015.12.31 기말자본']
liabilities_word_list_gaap = [' 부채총계', ' 부 채 총 계', ' 부채 총계', ' 부 채 총 계', ' 부 채 총 계', ' 부 채 계']
revenue_code_list = ['ifrs_GrossProfit', 'ifrs-full_GrossProfit']
operating_income_code_list = ['dart_OperatingIncomeLoss',]
profit_loss_code_list = ['ifrs_ProfitLoss', 'ifrs-full_ProfitLoss']
asset_code_list = ['ifrs_Assets', 'ifrs-full_Assets']
capital_code_list = ['ifrs_Equity', 'ifrs-full_Equity']
liabilities_code_list = ['ifrs_Liabilities', 'ifrs-full_Liabilities']
| [
"parkhyongsok@naver.com"
] | parkhyongsok@naver.com |
887284504418d7fb5752e642ca499b54970ebb22 | 82352d4737a87f62328aa061853d544820f417f7 | /utils/sample_statistics.py | 4605586f1da692068bb8f9b2fa3487316847c40c | [] | no_license | zuiyueyin/python-learning-notes | 054162c25db881f0f2133088099b5ca217143952 | f45f0879cc70eb59de67a270a6ec8dbb2cf8e742 | refs/heads/master | 2023-05-29T21:50:04.702395 | 2020-06-09T04:00:19 | 2020-06-09T04:00:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | # -*-coding: utf-8 -*-
"""
@Project: python-learning-notes
@File : sample_statistics.py
@Author : panjq
@E-mail : pan_jinquan@163.com
@Date : 2019-07-13 15:09:33
"""
from utils import file_processing, plot_utils
import numpy as np
import pandas as pd
from modules.pandas_json import pandas_tools
def count_data_info(data_list, _print=True, plot=True, title="data count info", line_names="data"):
'''
statis sample nums
print(pd.value_counts(label_list))
:param data_list:
:return: label_set : label set
label_count: label nums
'''
data_set = list(set(data_list))
data_set.sort()
count_list = []
for s in data_set:
nums = data_list.count(s)
count_list.append(nums)
print("mean count :{}/{}={}".format(len(data_list), len(data_set), len(data_list) / len(data_set)))
if plot:
plot_utils.plot_bar(x_data=data_set, y_data=count_list, title=title, xlabel="ID", ylabel="COUNT")
# plot_utils.plot_multi_line([data_set], [count_list], [line_names], title=title, xlabel="ID", ylabel="COUNT")
return count_list, data_set
def count_data_dict(data_list):
'''
statis sample nums
print(pd.value_counts(label_list))
:param data_list:
:return: label_set : label set
label_count: label nums
'''
data_set = list(set(data_list))
data_set.sort()
count_dict = []
for s in data_set:
nums = data_list.count(s)
count_dict[s] = nums
return count_dict
def count_data_info_pd(data_list, _print=True, plot=True, title="data count info", line_names="data"):
p = pd.value_counts(data_list, sort=False)
if _print:
print(p)
data_set = []
count_list = []
for key, count in p.items():
# count=p[key]
data_set.append(key)
count_list.append(count)
print("mean count :{}/{}={}".format(len(data_list), len(data_set), len(data_list) / len(data_set)))
if plot:
data_range = list(range(0, len(data_set)))
# data_range=data_set
plot_utils.plot_bar(x_data=data_range, y_data=count_list, title=title, xlabel="ID", ylabel="COUNT")
# plot_utils.plot_multi_line([data_set], [count_list], [line_names], title=title, xlabel="ID", ylabel="COUNT")
# return count_list, data_set
return p
if __name__ == "__main__":
# image_dir = "/media/dm/dm2/project/dataset/face_recognition/NVR/facebank/NVR_3_20190605_1005_VAL"
# dataset="/media/dm/dm2/project/dataset/face_recognition/CASIA-FaceV5/"
# image_dir = dataset+"CASIA-Faces"
# dataset="/media/dm/dm2/project/dataset/face_recognition/celebs_add_movies/"
# image_dir = dataset+"Asian_Faces"
image_dir = '/media/dm/dm1/project/dataset/face_recognition/X2T/X2T_Face233/val'
# image_dir = '/media/dm/dm1/project/dataset/face_recognition/NVR/face/NVR1/trainval'
image_list, label_list = file_processing.get_files_labels(image_dir)
name_table = list(set(label_list))
label_list = file_processing.encode_label(name_list=label_list, name_table=name_table)
label_list = [int(l) for l in label_list]
label_list.sort()
# count = Counter(label_list)
# count = label_list.count()
# print(count)
pd_data = count_data_info_pd(label_list)
filename = "my_test2.csv"
pd = pandas_tools.construct_pd(index=None, columns_name=["A"], content=pd_data, filename=filename)
print(pd)
| [
"panjinquan@dm-ai.cn"
] | panjinquan@dm-ai.cn |
feaa2e232a12eb04ca4a8f06a89234c1a1fb0dbc | e34ba843cf682892462aec8b477d4a708968286d | /dlgo/reinforce/experience_test.py | 1d6a67e69a72733b4a649f9e599cc802f65cb0df | [] | no_license | mecha2k/mygo | e088e4abff292aa225dd22655ef9032cd89ddabc | db77aeade0ef25b9cd8d0097aff7dd7cc7d78ef6 | refs/heads/master | 2023-01-21T21:37:57.930762 | 2020-11-26T14:02:33 | 2020-11-26T14:02:33 | 303,343,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | import unittest
import numpy as np
from dlgo.reinforce import experience
class ExperienceTest(unittest.TestCase):
def test_combine_experience(self):
collector1 = experience.ExperienceCollector()
collector1.begin_episode()
collector1.record_decision(
state=np.array(
[
[1, 1],
[1, 1],
]
),
action=1,
)
collector1.record_decision(
state=np.array(
[
[2, 2],
[2, 2],
]
),
action=2,
)
collector1.complete_episode(reward=1)
collector1.begin_episode()
collector1.record_decision(
state=np.array(
[
[3, 3],
[3, 3],
]
),
action=3,
)
collector1.complete_episode(reward=2)
collector2 = experience.ExperienceCollector()
collector2.begin_episode()
collector2.record_decision(
state=np.array(
[
[4, 4],
[4, 4],
]
),
action=4,
)
collector2.complete_episode(reward=3)
combined = experience.combine_experience([collector1, collector2])
# 4 decisions. Each state is a 2x2 matrix
self.assertEqual((4, 2, 2), combined.states.shape)
self.assertEqual((4,), combined.actions.shape)
self.assertEqual([1, 2, 3, 4], list(combined.actions))
self.assertEqual((4,), combined.rewards.shape)
self.assertEqual([1, 1, 2, 3], list(combined.rewards))
| [
"mecha2k@naver.com"
] | mecha2k@naver.com |
37e9ec85ea551c5a0f77ba61a24f955da77d0426 | 6b3b61d2c5ba4998e7390c76be87be569c713f7a | /Exercicio_022/desafio_022.py | 7091a3a066c9a597d33f70c27ebb69bd37711884 | [] | no_license | loc-dev/CursoEmVideo-Python-Exercicios | 22a8b4621eb4bd95ddfca2553693eccca4a0786e | 9bceec567e653c1fbaa01b9668cd0e7a828e53a9 | refs/heads/master | 2022-12-02T18:46:18.506684 | 2020-08-22T20:50:15 | 2020-08-22T20:50:15 | 256,840,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | # Desafio 022 - Referente aula Fase09
# Crie um programa que leia o nome completo de uma pessoa
# e mostre:
# - O nome com todas as letras maiúsculas e minúsculas.
# - Quantas letras no total (sem considerar espaços).
# - Quantas letras tem o primeiro nome.
nome = input("Digite o seu nome completo: ")
print('')
print("Analisando o seu nome...")
print("Seu nome em letras maiúsculas é: {}".format(nome.upper()))
print("Seu nome em letras minúsculas é: {}".format(nome.lower()))
print("Seu nome tem ao todo {} letras".format(len(nome.replace(" ", ""))))
print("Seu primeiro nome é {} e ele tem {} letras".format(nome.split()[0], len(nome.split()[0])))
| [
"leonardoc.developer@gmail.com"
] | leonardoc.developer@gmail.com |
28342a28b1ed2ee0b8a34ce31c0655baa1e7665a | b2625b1a1ef4a3a255ae88b6d77c425727187eeb | /mmpose/datasets/datasets/animal/animal_ap10k_dataset.py | 01651f22eddc1e2648627334b2c43b59ee582e75 | [
"Apache-2.0"
] | permissive | wojiazaiyugang/mmpose | acd4083d142c5c4c2dd87e6be94a5891a42d2797 | 8947b39294b037e8272c6cf2f53ae4aa7d22193b | refs/heads/master | 2023-09-01T23:45:43.857657 | 2021-11-23T03:03:02 | 2021-11-23T03:03:02 | 356,105,054 | 0 | 0 | Apache-2.0 | 2021-09-16T06:36:44 | 2021-04-09T02:08:50 | Python | UTF-8 | Python | false | false | 12,368 | py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import warnings
from collections import OrderedDict, defaultdict
import json_tricks as json
import numpy as np
from mmcv import Config
from xtcocotools.cocoeval import COCOeval
from ....core.post_processing import oks_nms, soft_oks_nms
from ...builder import DATASETS
from ..base import Kpt2dSviewRgbImgTopDownDataset
@DATASETS.register_module()
class AnimalAP10KDataset(Kpt2dSviewRgbImgTopDownDataset):
"""AP-10K dataset for animal pose estimation.
`AP-10K: A Benchmark for Animal Pose Estimation in the Wild’
Neurips Dataset Track'2021
More details can be found in the `paper
<https://arxiv.org/abs/2108.12617>`__ .
The dataset loads raw features and apply specified transforms
to return a dict containing the image tensors and other information.
AP-10K keypoint indexes::
0: 'L_Eye',
1: 'R_Eye',
2: 'Nose',
3: 'Neck',
4: 'root of tail',
5: 'L_Shoulder',
6: 'L_Elbow',
7: 'L_F_Paw',
8: 'R_Shoulder',
9: 'R_Elbow',
10: 'R_F_Paw,
11: 'L_Hip',
12: 'L_Knee',
13: 'L_B_Paw',
14: 'R_Hip',
15: 'R_Knee',
16: 'R_B_Paw'
Args:
ann_file (str): Path to the annotation file.
img_prefix (str): Path to a directory where images are held.
Default: None.
data_cfg (dict): config
pipeline (list[dict | callable]): A sequence of data transforms.
dataset_info (DatasetInfo): A class containing all dataset info.
test_mode (bool): Store True when building test or
validation dataset. Default: False.
"""
def __init__(self,
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=None,
test_mode=False):
if dataset_info is None:
warnings.warn(
'dataset_info is missing. '
'Check https://github.com/open-mmlab/mmpose/pull/663 '
'for details.', DeprecationWarning)
cfg = Config.fromfile('configs/_base_/datasets/ap10k.py')
dataset_info = cfg._cfg_dict['dataset_info']
super().__init__(
ann_file,
img_prefix,
data_cfg,
pipeline,
dataset_info=dataset_info,
test_mode=test_mode)
self.use_gt_bbox = data_cfg['use_gt_bbox']
self.bbox_file = data_cfg['bbox_file']
self.det_bbox_thr = data_cfg.get('det_bbox_thr', 0.0)
self.use_nms = data_cfg.get('use_nms', True)
self.soft_nms = data_cfg['soft_nms']
self.nms_thr = data_cfg['nms_thr']
self.oks_thr = data_cfg['oks_thr']
self.vis_thr = data_cfg['vis_thr']
self.ann_info['use_different_joint_weights'] = False
self.db, self.id2Cat = self._get_db()
print(f'=> num_images: {self.num_images}')
print(f'=> load {len(self.db)} samples')
def _get_db(self):
"""Load dataset."""
assert self.use_gt_bbox
gt_db, id2Cat = self._load_coco_keypoint_annotations()
return gt_db, id2Cat
def _load_coco_keypoint_annotations(self):
"""Ground truth bbox and keypoints."""
gt_db, id2Cat = [], dict()
for img_id in self.img_ids:
db_tmp, id2Cat_tmp = self._load_coco_keypoint_annotation_kernel(
img_id)
gt_db.extend(db_tmp)
id2Cat.update({img_id: id2Cat_tmp})
return gt_db, id2Cat
def _load_coco_keypoint_annotation_kernel(self, img_id):
"""load annotation from COCOAPI.
Note:
bbox:[x1, y1, w, h]
Args:
img_id: coco image id
Returns:
dict: db entry
"""
img_ann = self.coco.loadImgs(img_id)[0]
width = img_ann['width']
height = img_ann['height']
num_joints = self.ann_info['num_joints']
ann_ids = self.coco.getAnnIds(imgIds=img_id, iscrowd=False)
objs = self.coco.loadAnns(ann_ids)
# sanitize bboxes
valid_objs = []
for obj in objs:
if 'bbox' not in obj:
continue
x, y, w, h = obj['bbox']
x1 = max(0, x)
y1 = max(0, y)
x2 = min(width - 1, x1 + max(0, w - 1))
y2 = min(height - 1, y1 + max(0, h - 1))
if ('area' not in obj or obj['area'] > 0) and x2 > x1 and y2 > y1:
obj['clean_bbox'] = [x1, y1, x2 - x1, y2 - y1]
valid_objs.append(obj)
objs = valid_objs
bbox_id = 0
rec = []
id2Cat = []
for obj in objs:
if 'keypoints' not in obj:
continue
if max(obj['keypoints']) == 0:
continue
if 'num_keypoints' in obj and obj['num_keypoints'] == 0:
continue
joints_3d = np.zeros((num_joints, 3), dtype=np.float32)
joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)
keypoints = np.array(obj['keypoints']).reshape(-1, 3)
joints_3d[:, :2] = keypoints[:, :2]
joints_3d_visible[:, :2] = np.minimum(1, keypoints[:, 2:3])
center, scale = self._xywh2cs(*obj['clean_bbox'][:4])
image_file = os.path.join(self.img_prefix, self.id2name[img_id])
rec.append({
'image_file': image_file,
'center': center,
'scale': scale,
'bbox': obj['clean_bbox'][:4],
'rotation': 0,
'joints_3d': joints_3d,
'joints_3d_visible': joints_3d_visible,
'dataset': self.dataset_name,
'bbox_score': 1,
'bbox_id': bbox_id
})
category = obj['category_id']
id2Cat.append({
'image_file': image_file,
'bbox_id': bbox_id,
'category': category,
})
bbox_id = bbox_id + 1
return rec, id2Cat
def evaluate(self, outputs, res_folder, metric='mAP', **kwargs):
"""Evaluate coco keypoint results. The pose prediction results will be
saved in `${res_folder}/result_keypoints.json`.
Note:
batch_size: N
num_keypoints: K
heatmap height: H
heatmap width: W
Args:
outputs (list(dict))
:preds (np.ndarray[N,K,3]): The first two dimensions are
coordinates, score is the third dimension of the array.
:boxes (np.ndarray[N,6]): [center[0], center[1], scale[0]
, scale[1],area, score]
:image_paths (list[str]): For example, ['data/coco/val2017
/000000393226.jpg']
:heatmap (np.ndarray[N, K, H, W]): model output heatmap
:bbox_id (list(int)).
res_folder (str): Path of directory to save the results.
metric (str | list[str]): Metric to be performed. Defaults: 'mAP'.
Returns:
dict: Evaluation results for evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['mAP']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = defaultdict(list)
for output in outputs:
preds = output['preds']
boxes = output['boxes']
image_paths = output['image_paths']
bbox_ids = output['bbox_ids']
batch_size = len(image_paths)
for i in range(batch_size):
image_id = self.name2id[image_paths[i][len(self.img_prefix):]]
cat = self.id2Cat[image_id][bbox_ids[i]]['category']
kpts[image_id].append({
'keypoints': preds[i],
'center': boxes[i][0:2],
'scale': boxes[i][2:4],
'area': boxes[i][4],
'score': boxes[i][5],
'image_id': image_id,
'bbox_id': bbox_ids[i],
'category': cat
})
kpts = self._sort_and_unique_bboxes(kpts)
# rescoring and oks nms
num_joints = self.ann_info['num_joints']
vis_thr = self.vis_thr
oks_thr = self.oks_thr
valid_kpts = []
for image_id in kpts.keys():
img_kpts = kpts[image_id]
for n_p in img_kpts:
box_score = n_p['score']
kpt_score = 0
valid_num = 0
for n_jt in range(0, num_joints):
t_s = n_p['keypoints'][n_jt][2]
if t_s > vis_thr:
kpt_score = kpt_score + t_s
valid_num = valid_num + 1
if valid_num != 0:
kpt_score = kpt_score / valid_num
# rescoring
n_p['score'] = kpt_score * box_score
if self.use_nms:
nms = soft_oks_nms if self.soft_nms else oks_nms
keep = nms(list(img_kpts), oks_thr, sigmas=self.sigmas)
valid_kpts.append([img_kpts[_keep] for _keep in keep])
else:
valid_kpts.append(img_kpts)
self._write_coco_keypoint_results(valid_kpts, res_file)
info_str = self._do_python_keypoint_eval(res_file)
name_value = OrderedDict(info_str)
return name_value
def _write_coco_keypoint_results(self, keypoints, res_file):
"""Write results into a json file."""
data_pack = [{
'cat_id': self._class_to_coco_ind[cls],
'cls_ind': cls_ind,
'cls': cls,
'ann_type': 'keypoints',
'keypoints': keypoints
} for cls_ind, cls in enumerate(self.classes)
if not cls == '__background__']
results = self._coco_keypoint_results_one_category_kernel(data_pack[0])
with open(res_file, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
def _coco_keypoint_results_one_category_kernel(self, data_pack):
"""Get coco keypoint results."""
keypoints = data_pack['keypoints']
cat_results = []
for img_kpts in keypoints:
if len(img_kpts) == 0:
continue
_key_points = np.array(
[img_kpt['keypoints'] for img_kpt in img_kpts])
key_points = _key_points.reshape(-1,
self.ann_info['num_joints'] * 3)
result = [{
'image_id': img_kpt['image_id'],
'category_id': img_kpt['category'],
'keypoints': key_point.tolist(),
'score': float(img_kpt['score']),
'center': img_kpt['center'].tolist(),
'scale': img_kpt['scale'].tolist()
} for img_kpt, key_point in zip(img_kpts, key_points)]
cat_results.extend(result)
return cat_results
def _do_python_keypoint_eval(self, res_file):
"""Keypoint evaluation using COCOAPI."""
coco_det = self.coco.loadRes(res_file)
coco_eval = COCOeval(self.coco, coco_det, 'keypoints', self.sigmas)
coco_eval.params.useSegm = None
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
stats_names = [
'AP', 'AP .5', 'AP .75', 'AP (M)', 'AP (L)', 'AR', 'AR .5',
'AR .75', 'AR (M)', 'AR (L)'
]
info_str = list(zip(stats_names, coco_eval.stats))
return info_str
def _sort_and_unique_bboxes(self, kpts, key='bbox_id'):
"""sort kpts and remove the repeated ones."""
for img_id, persons in kpts.items():
num = len(persons)
kpts[img_id] = sorted(kpts[img_id], key=lambda x: x[key])
for i in range(num - 1, 0, -1):
if kpts[img_id][i][key] == kpts[img_id][i - 1][key]:
del kpts[img_id][i]
return kpts
| [
"noreply@github.com"
] | wojiazaiyugang.noreply@github.com |
60994e422d1fdc9199f7d90ae6cd7856ef8e3102 | c1f60f28cbd74a639dc89b22518ae33765267af4 | /ravem/indico_ravem/__init__.py | cbc2ba2885adce70b301cb3a688236fbcc23aee4 | [
"MIT"
] | permissive | DalavanCloud/indico-plugins-cern | b0a97dbbd7fb4dc272977b121ec92931ee316ad7 | bb67d2fb9e3d24faeeff2b78a5e9bcff52ac5f26 | refs/heads/master | 2020-04-22T08:58:17.025951 | 2019-01-21T14:25:08 | 2019-01-21T14:25:54 | 170,255,750 | 1 | 0 | null | 2019-02-12T05:08:40 | 2019-02-12T05:08:40 | null | UTF-8 | Python | false | false | 376 | py | # This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2018 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from indico.util.i18n import make_bound_gettext
_ = make_bound_gettext('ravem')
| [
"adrian.moennich@cern.ch"
] | adrian.moennich@cern.ch |
d37b2518c4bf1cc6aac3deab5e80bfaa2e4e17a7 | f897f0e594a9157b9e56cee8b24f68bb9c9221f8 | /aml_workspace/src/AML/aml_io/src/aml_io/tf_io.py | 7edd96ff4f34235b45528fe63e5db611b68b9503 | [] | no_license | HDClark94/Honeycomb | b1056a0f8b41312d0a87d5cd80803a2baf613bae | 96899e6b362358d0ce1e3671cbc77f30856c80ac | refs/heads/master | 2020-03-29T19:48:29.330790 | 2018-10-22T07:50:01 | 2018-10-22T07:50:01 | 150,281,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import tensorflow as tf
def load_tf_check_point(session, filename):
saver = tf.train.Saver()
saver.restore(session, filename)
print("Model restored.")
def save_tf_check_point(session, filename):
saver = tf.train.Saver()
save_path = saver.save(session, filename)
print("tf checkpoint saved in file: %s" % save_path) | [
"harrydclark91@gmail.com"
] | harrydclark91@gmail.com |
202679d52735c93032e4fa601cba695539c3a1ba | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /AaSXX4SKNdZ7mgqK7_14.py | 83c2bdb8a06c274f5040c0a84f78e2868910eb94 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,822 | py | """
Check the principles of minimalist code in the [intro to the first
challenge](https://edabit.com/challenge/2XLjgZhmACph76Pkr).
In the **Code** tab you will find a code that is missing a single character in
order to pass the tests. However, your goal is to submit a function as
**minimalist** as possible. Use the tips in the tips section below.
Write a function that returns the **first truthy argument** passed to the
function. If all arguments are falsy, return the string `"not found"`. The
function will be called with a **minimum of one** and a **maximum of four**
arguments: `a`, `b`, `c`, `d`.
### Tips
The operator `or` can be used to assign or return the first truthy value among
two or more elements. If no truthy value is found, the last element will be
returned.
For example, the code:
def one_of_these(a, b, c):
return a if a else b if b else c
Can be simplified to:
def one_of_these(a, b, c):
return a or b or c
### Bonus
Once a truthy value is found, the rest of the elements will not be checked.
This can be used to define a sort of default value that will be returned if
all of the previous elements happen to be false or empty:
txt1 = ""
txt2 = "Edabit"
txt1 or "Empty string" ➞ "Empty string"
txt2 or "Empty string" ➞ "Edabit"
### Notes
* This is an open series: there isn't a definite list of features for the challenges. Please, do not hesitate to leave your **suggestions** in the **Comments**.
* _ **Readability**_ is indeed a subjective concept. **Let's discuss it!** Feel free to leave your opinion in the **Comments**.
* You can find all the exercises in this series [over here](https://edabit.com/collection/8F3LA2Mwrf5bp7kse).
"""
def first_one(a, b=None ,c=None, d=None):
return a or b or c or d or 'not found'
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
8ca9448dc3b945debb5c81ca77e931b7ffa80336 | 683d81b0d0ac10e3782b42f1ea6007124d72a663 | /1. Problems/c. Array/a. 1D 2 - Sequence - Split Array into Conscutive Subsequences.py | 02ae577f943539e02833f37b9170ed94defdd809 | [] | no_license | valleyceo/code_journal | 4b5e6fcbd792fedc639f773ca2bbf6725a9b9146 | 0191a6623e7a467c2c0070c4545358301a5e42ba | refs/heads/master | 2022-09-16T17:47:55.343712 | 2022-09-03T23:46:38 | 2022-09-03T23:46:38 | 129,997,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # LC 659. Split Array into Consecutive Subsequences
'''
You are given an integer array nums that is sorted in non-decreasing order.
Determine if it is possible to split nums into one or more subsequences such that both of the following conditions are true:
Each subsequence is a consecutive increasing sequence (i.e. each integer is exactly one more than the previous integer).
All subsequences have a length of 3 or more.
Return true if you can split nums according to the above conditions, or false otherwise.
A subsequence of an array is a new array that is formed from the original array by deleting some (can be none) of the elements without disturbing the relative positions of the remaining elements. (i.e., [1,3,5] is a subsequence of [1,2,3,4,5] while [1,3,2] is not).
Example 1:
Input: nums = [1,2,3,3,4,5]
Output: true
Explanation: nums can be split into the following subsequences:
[1,2,3,3,4,5] --> 1, 2, 3
[1,2,3,3,4,5] --> 3, 4, 5
Example 2:
Input: nums = [1,2,3,3,4,4,5,5]
Output: true
Explanation: nums can be split into the following subsequences:
[1,2,3,3,4,4,5,5] --> 1, 2, 3, 4, 5
[1,2,3,3,4,4,5,5] --> 3, 4, 5
'''
class Solution:
def isPossible(self, nums: List[int]) -> bool:
return self.optimizedSolution(nums)
# O(n) time | O(n) space
def optimizedSolution(self, nums: List[int]) -> bool:
left = Counter(nums)
end_pointer = Counter()
for n in nums:
if not left[n]:
continue
left[n] -= 1
if end_pointer[n - 1] > 0:
end_pointer[n - 1] -= 1
end_pointer[n] += 1
elif left[n + 1] and left[n + 2]:
left[n + 1] -= 1
left[n + 2] -= 1
end_pointer[n + 2] += 1
else:
return False
return True
"""
Insight:
* You cannot check for more maximum sequence (ex: [1,2,3,3,4,4,5] -> [1,2,3,4,5], [3,4] is wrong)
- You still can solve greedily:
- Create a counter and a last seq poimarkernter.
- For each number sequence, if there is a prior sequence then add it
- If not, then create a new sequence (check if beginning sequence is larger than 2)
- Why do you need a prev seq marker?
- Because seq needs to stop at 3 and see if new array is formed (ex. [1, 2, 3, 3, 4, 5]).
- Checking new sequence comes first, and checking prev marker comes first on next iteration
"""
| [
"ericjkim9@gmail.com"
] | ericjkim9@gmail.com |
7192fd53903767f99c7e0de95b447794230dc45d | 658849ce4adc682e403631df460d886c21c55146 | /pyQt/02_pdfText.py | 19a44069fc908e8973ddfdbe273a462d091b29ad | [] | no_license | gsrr/Python | eec6b6d1189a5a08ab913925cc5428e9cac0e4ce | d412a7a40bc7875d2fce58311f099945698569f5 | refs/heads/master | 2023-02-11T12:36:49.346289 | 2023-02-01T17:28:52 | 2023-02-01T17:28:52 | 40,101,541 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from PyQt4.QtGui import *
import sys
app = QApplication(sys.argv)
text_file_path = open('02_sample.txt').read()
doc = QTextDocument(text_file_path)
printer = QPrinter(QPrinter.HighResolution)
printer.setOutputFormat(QPrinter.PdfFormat)
printer.setOutputFileName('02_sample.pdf')
doc.print_(printer) | [
"jerrycheng1128@gmail.com"
] | jerrycheng1128@gmail.com |
10b44701f84e0e8b55cff95887fbae486fc399ff | a6fae33cdf3d3cb0b0d458c2825a8d8cc010cd25 | /l3/z3/.history/moves_manager_20200522163947.py | 86bb3edd1e5a58526cd512533dea3531b6a94f81 | [] | no_license | Qabrix/optimization_amh | 12aab7c7980b38812ec38b7e494e82452a4176b4 | 6a4f5b897a4bef25f6e2acf535ba20ace7351689 | refs/heads/main | 2022-12-28T10:57:00.064130 | 2020-10-17T22:57:27 | 2020-10-17T22:57:27 | 304,983,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | from random import choice, randint
class MovesManager():
def __init__(self, grid=[], start_pos=[], n=0 , m=0):
self.n = n
self.m = m
self.grid = grid
self.start_pos = start_pos
self.possible_moves = ['U', 'D', 'L', 'R']
def validate_move(self, step, pos=start_pos, grid=self.grid):
if step == 'U' and grid[pos[0]-1][pos[1]] != '1':
return True
elif step == 'D' and grid[pos[0]+1][pos[1]] != '1':
return True
elif step == 'L' and grid[pos[0]][pos[1]-1] != '1':
return True
elif step == 'R' and grid[pos[0]][pos[1]+1] != '1':
return True
else:
return False
def move(self, step, grid):
if step == 'U':
grid[0] += -1
elif step == 'D':
grid[0] += 1
elif step == 'L':
grid[1] += -1
elif step == 'R':
grid[1] += 1
def explore(self, pos, path, grid):
new_path = []
for step in path:
if self.validate_move(step, pos, grid):
self.move(step, pos)
new_path += [step]
if self.check_for_exit(pos, grid):
return new_path, True
return new_path, False
def random_moves(self, pos, grid, n, m, step_limit):
path = []
step = ''
while len(path) <= step_limit:
step = choice(self.possible_moves)
while not self.validate_move(step, pos, grid):
step = choice(self.possible_moves)
for _ in range(randint(1, min(n, m))):
if not self.validate_move(step, pos, grid):
break
self.move(step, pos)
path += [step]
if self.check_for_exit(pos, grid):
return path
return path
def check_for_exit(self, pos, grid):
return grid[pos[0]][pos[1]] == '8' or (
grid[pos[0]][pos[1] + 1] == '8' or
grid[pos[0] + 1][pos[1]] == '8' or
grid[pos[0]][pos[1] - 1] == '8' or
grid[pos[0] - 1][pos[1]] == '8'
)
| [
"kubabalicki@gmail.com"
] | kubabalicki@gmail.com |
49cff15efb1486afccf82fc66324d9b682b7fe42 | 1a2828536c57242cd72e96ed887dfea48f250715 | /divide_dataset.py | ca7d0a9cba3b47d3402e698566ee56132108c56c | [] | no_license | Frostmoune/FaseSR | 2f5f4dd421b2b6e5315d4ee55977015198dff5ff | 137837e2c79957f70f4c2546e27733290506459e | refs/heads/master | 2020-08-10T18:40:07.722703 | 2019-10-11T09:28:30 | 2019-10-11T09:28:30 | 214,397,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,832 | py | from models import networks
from data import create_dataloader, create_dataset
import argparse
import torch
from sklearn.cluster import MiniBatchKMeans
import os
import numpy as np
import shutil
import pickle
if __name__ == '__main__':
opt = {
'gpu_ids': [0, 1, 2, 3],
'network_F': {
'mode': 'Sphere20a',
'path': '/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/pretrained/sphere20a_20171020.pth'
},
'dataset': {
'name': 'CelebA',
'mode': 'LRHR',
'subset_file': None,
'phase': 'train',
'data_type': 'img',
'scale': 4,
'HR_size': 96,
'use_shuffle': True,
'use_flip': False,
'use_rot': False,
'batch_size': 40,
'n_workers': 4,
'color': False
}
}
parser = argparse.ArgumentParser()
parser.add_argument('--HR_Root', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ/HR",
help = 'Path to val HR.')
parser.add_argument('--LR_Root', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ/LR",
help = 'Path to val LR.')
parser.add_argument('--Clusters', type = int, default = 3, help = 'Number of clusters')
parser.add_argument('--Train', type = int, default = 0, help = 'Train or not')
parser.add_argument('--Model_Path', type = str, default = "/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ/cluster.model",
help = 'Path to Cluster model')
args = parser.parse_args()
Root = '/GPUFS/nsccgz_yfdu_16/ouyry/SISRC/FaceSR-ESRGAN/dataset/FFHQ'
opt['dataset']['dataroot_LR'] = args.LR_Root
opt['dataset']['dataroot_HR'] = args.HR_Root
test_set = create_dataset(opt['dataset'])
test_loader = create_dataloader(test_set, opt['dataset'])
device = torch.device('cuda' if opt['gpu_ids'] is not None else 'cpu')
sphere = networks.define_F(opt).to(device)
for i in range(args.Clusters):
try:
os.makedirs(args.HR_Root + str(i))
os.makedirs(args.LR_Root + str(i))
except:
pass
vectors = None
LR_paths = []
HR_paths = []
for i, data in enumerate(test_loader):
HR = data['HR'].to(device)
HR_vec = sphere(HR).to('cpu').numpy()
if vectors is None:
vectors = HR_vec
else:
vectors = np.concatenate((vectors, HR_vec), axis = 0)
LR_paths += data['LR_path']
HR_paths += data['HR_path']
print("Sphere %d batch"%i)
print(vectors.shape)
print("Sphere Done ...")
mean = np.mean(vectors, axis = 0, keepdims = True)
std = np.std(vectors, axis = 0, keepdims = True)
vectors = (vectors - mean) / std
if args.Train:
model = MiniBatchKMeans(n_clusters = args.Clusters, batch_size = 2000, random_state = 0, max_iter = 5000)
for i in range(0, vectors.shape[0], 2000):
model.partial_fit(vectors[i:i+2000, :])
with open(args.Model_Path, 'wb') as f:
pickle.dump(model, f)
else:
with open(args.Model_Path, 'rb') as f:
model = pickle.load(f)
labels = model.predict(vectors)
print("Cluster Done ...")
for i, label in enumerate(labels):
print(i)
shutil.copy(LR_paths[i], args.LR_Root + str(label))
shutil.copy(HR_paths[i], args.HR_Root + str(label))
print("Done") | [
"810343087@qq.com"
] | 810343087@qq.com |
1f0d272239f6f020cfd64030a8469292477a9b34 | 1800155dcdb48bf956fa423858a8cc20ed27e6cb | /game-of-life.py | 97937b7282f8adfcc28d8545dbd7e53f5206f1d9 | [] | no_license | gitprouser/LeetCode-3 | 1cc2d1dbbf439af4b3768da388dafd514cc5432b | 530ea79f0377e1fc3fbfb5c5cfe7768159144e57 | refs/heads/master | 2021-06-06T16:30:14.795093 | 2016-08-22T21:40:01 | 2016-08-22T21:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,380 | py | """
According to the Wikipedia's article: "The Game of Life, also known simply as Life, is a cellular automaton devised by the British mathematician John Horton Conway in 1970."
Given a board with m by n cells, each cell has an initial state live (1) or dead (0). Each cell interacts with its eight neighbors (horizontal, vertical, diagonal) using the following four rules (taken from the above Wikipedia article):
Any live cell with fewer than two live neighbors dies, as if caused by under-population.
Any live cell with two or three live neighbors lives on to the next generation.
Any live cell with more than three live neighbors dies, as if by over-population..
Any dead cell with exactly three live neighbors becomes a live cell, as if by reproduction.
Write a function to compute the next state (after one update) of the board given its current state.
Follow up:
Could you solve it in-place? Remember that the board needs to be updated at the same time: You cannot update some cells first and then use their updated values to update other cells.
In this question, we represent the board using a 2D array. In principle, the board is infinite, which would cause problems when the active area encroaches the border of the array. How would you address these problems?
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases.
"""
| [
"tohaowu@gmail.com"
] | tohaowu@gmail.com |
59ee7d05e8fef6ab8337b80af859711836d8dae0 | fa93e53a9eee6cb476b8998d62067fce2fbcea13 | /build/tiago_multi/catkin_generated/generate_cached_setup.py | 7dee2c4d07d16dce4cd6ca6c5a582b00711a0726 | [] | no_license | oyetripathi/ROS_conclusion_project | 2947ee2f575ddf05480dabc69cf8af3c2df53f73 | 01e71350437d57d8112b6cec298f89fc8291fb5f | refs/heads/master | 2023-06-30T00:38:29.711137 | 2021-08-05T09:17:54 | 2021-08-05T09:17:54 | 392,716,311 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/sandeepan/tiago_public_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/sandeepan/tiago_public_ws/devel/.private/tiago_multi/env.sh')
output_filename = '/home/sandeepan/tiago_public_ws/build/tiago_multi/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"sandeepan.ghosh.ece20@itbhu.ac.in"
] | sandeepan.ghosh.ece20@itbhu.ac.in |
416bc0ae6a2a36ec1c764d24e4f594644b0a7bec | 13800b7827598e76428a335559b7bf11867ec2f0 | /examples/py/binance-fetch-all-trades.py | ac56fca50cf98cd41c5da62c1b4310aafb80abd7 | [
"MIT"
] | permissive | ccxt/ccxt | b40a0466f5c430a3c0c6026552ae697aa80ba6c6 | e4065f6a490e6fc4dd7a72b375428b2faa570668 | refs/heads/master | 2023-09-04T03:41:29.787733 | 2023-09-03T19:25:57 | 2023-09-03T19:25:57 | 91,253,698 | 30,798 | 8,190 | MIT | 2023-09-14T21:59:09 | 2017-05-14T15:41:56 | Python | UTF-8 | Python | false | false | 1,686 | py | # -*- coding: utf-8 -*-
import os
import sys
import csv
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
exchange = ccxt.binance()
markets = exchange.load_markets()
symbol = 'ETH/BTC'
market = exchange.market(symbol)
one_hour = 3600 * 1000
since = exchange.parse8601('2018-12-12T00:00:00')
now = exchange.milliseconds()
end = exchange.parse8601(exchange.ymd(now) + 'T00:00:00')
previous_trade_id = None
filename = exchange.id + '_' + market['id'] + '.csv'
with open(filename, mode="w") as csv_f:
csv_writer = csv.DictWriter(csv_f, delimiter=",", fieldnames=["timestamp", "size", "price", "side"])
csv_writer.writeheader()
while since < end:
try:
trades = exchange.fetch_trades(symbol, since)
print(exchange.iso8601(since), len(trades), 'trades')
if len(trades):
last_trade = trades[-1]
if previous_trade_id != last_trade['id']:
since = last_trade['timestamp']
previous_trade_id = last_trade['id']
for trade in trades:
csv_writer.writerow({
'timestamp': trade['timestamp'],
'size': trade['amount'],
'price': trade['price'],
'side': trade['side'],
})
else:
since += one_hour
else:
since += one_hour
except ccxt.NetworkError as e:
print(type(e).__name__, str(e))
exchange.sleep(60000)
| [
"igor.kroitor@gmail.com"
] | igor.kroitor@gmail.com |
a7177ae8e232a76fe17c2554369f3bd1be1b0acb | 4951103d1a112fbb90059a977582f0642546c4cb | /mitmproxy/__init__.py | 9697de8780ca420e99c33da5d4d5716bcbe6ce7d | [
"MIT"
] | permissive | takeratta/mitmproxy | 9d114f221e99e7c522fd8bdd51561753c974ae6e | 569d275d763f499cce9673fcf118dcc8d59d2eeb | refs/heads/master | 2022-01-22T05:23:46.978493 | 2017-10-22T16:06:44 | 2017-10-22T16:06:44 | 108,022,306 | 0 | 0 | MIT | 2019-07-29T09:05:59 | 2017-10-23T18:26:23 | Python | UTF-8 | Python | false | false | 137 | py | # https://github.com/mitmproxy/mitmproxy/issues/1809
# import script here so that pyinstaller registers it.
from . import script # noqa
| [
"git@maximilianhils.com"
] | git@maximilianhils.com |
d5f10f1c0e5820a716478d43e50433a89e88cfc7 | 0d9c964fd7644395a3f0763f484e485fcc67f762 | /new/src/22.02.2020/replace.py | e6ba448271b07b2a5d1d9d6fcc4d6b3745d42d8f | [
"Apache-2.0"
] | permissive | VladBaryliuk/my_start_tasks | eaa2e6ff031f2f504be11f0f64f5d99bd1a68a0e | bf387543e6fa3ee303cbef04d2af48d558011ed9 | refs/heads/main | 2023-04-14T14:00:08.415787 | 2021-04-24T13:47:38 | 2021-04-24T13:47:38 | 354,538,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | string = str (input())
s = str (input())
c = str (input())
print (string.replace(s, c))
| [
"vladmain9@gmail.com"
] | vladmain9@gmail.com |
ded616fccd10707347213e409fa9449335737283 | 3f576989246ddadc3ae9c9d48007a4866245a5f2 | /music_controller/api/urls.py | ae82696637729eec3a595c489018492417018065 | [] | no_license | raghavendra-musubi/django-react-house-party-app | d536fbc92508656685e36974a1cec7dca6d4b07d | b90780f993dbd90a66f6b83564854a8e6dbae3cc | refs/heads/main | 2023-03-05T14:37:59.809471 | 2021-02-17T21:52:10 | 2021-02-17T21:52:10 | 339,756,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from django.urls import path
from .views import RoomView, CreateRoomView
urlpatterns = [
path('room/',RoomView.as_view()),
path('create-room/',CreateRoomView.as_view())
]
| [
"raghavendra@techis.io"
] | raghavendra@techis.io |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.