content
stringlengths 5
1.05M
|
|---|
from opcode import opmap
from asm.ops.abc import Opcode
class GET_LEN(Opcode):
def __init__(self):
super().__init__(opmap["GET_LEN"], 0)
class MATCH_MAPPING(Opcode):
def __init__(self):
super().__init__(opmap["MATCH_MAPPING"], 0)
class MATCH_SEQUENCE(Opcode):
def __init__(self):
super().__init__(opmap["MATCH_SEQUENCE"], 0)
class MATCH_KEYS(Opcode):
def __init__(self):
super().__init__(opmap["MATCH_KEYS"], 0)
class COPY_DICT_WITHOUT_KEYS(Opcode):
def __init__(self):
super().__init__(opmap["COPY_DICT_WITHOUT_KEYS"], 0)
class ROT_N(Opcode):
def __init__(self, arg: int):
super().__init__(opmap["ROT_N"], arg)
class RERAISE(Opcode):
def __init__(self, arg: bool = False):
super().__init__(opmap["RERAISE"], arg)
class GEN_START(Opcode):
def __init__(self, arg: int):
super().__init__(opmap["GEN_START"], arg)
class MATCH_CLASS(Opcode):
def __init__(self, arg: int):
super().__init__(opmap["MATCH_CLASS"], arg)
|
"""Storage for change and authentication history."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING
from sqlalchemy import and_, or_
from sqlalchemy.sql import text
from gafaelfawr.models.history import (
HistoryCursor,
PaginatedHistory,
TokenChangeHistoryEntry,
)
from gafaelfawr.schema import AdminHistory, TokenChangeHistory
from gafaelfawr.util import normalize_datetime
if TYPE_CHECKING:
from datetime import datetime
from typing import Optional
from sqlalchemy.orm import Query, Session
from gafaelfawr.models.history import AdminHistoryEntry
from gafaelfawr.models.token import TokenType
__all__ = ["AdminHistoryStore", "TokenChangeHistoryStore"]
class AdminHistoryStore:
"""Stores and retrieves the history of changes to token administrators.
Parameters
----------
session : `sqlalchemy.orm.Session`
The underlying database session.
"""
def __init__(self, session: Session) -> None:
self._session = session
def add(self, entry: AdminHistoryEntry) -> None:
"""Record a change to the token administrators."""
new = AdminHistory(**entry.dict())
self._session.add(new)
class TokenChangeHistoryStore:
"""Stores and retrieves the history of changes to tokens.
Parameters
----------
session : `sqlalchemy.orm.Session`
The underlying database session.
"""
def __init__(self, session: Session) -> None:
self._session = session
def add(self, entry: TokenChangeHistoryEntry) -> None:
"""Record a change to a token."""
entry_dict = entry.dict()
# Convert the lists of scopes to the empty string for an empty list
# and a comma-separated string otherwise.
entry_dict["scopes"] = ",".join(sorted(entry.scopes))
if entry.old_scopes is not None:
entry_dict["old_scopes"] = ",".join(sorted(entry.old_scopes))
new = TokenChangeHistory(**entry_dict)
self._session.add(new)
def list(
self,
*,
cursor: Optional[HistoryCursor] = None,
limit: Optional[int] = None,
since: Optional[datetime] = None,
until: Optional[datetime] = None,
username: Optional[str] = None,
actor: Optional[str] = None,
key: Optional[str] = None,
token: Optional[str] = None,
token_type: Optional[TokenType] = None,
ip_or_cidr: Optional[str] = None,
) -> PaginatedHistory[TokenChangeHistoryEntry]:
"""Return all changes to a specific token.
Parameters
----------
cursor : `gafaelfawr.models.history.HistoryCursor`, optional
A pagination cursor specifying where to start in the results.
limit : `int`, optional
Limit the number of returned results.
since : `datetime.datetime`, optional
Limit the results to events at or after this time.
until : `datetime.datetime`, optional
Limit the results to events before or at this time.
username : `str`, optional
Limit the results to tokens owned by this user.
actor : `str`, optional
Limit the results to actions performed by this user.
key : `str`, optional
Limit the results to this token and any subtokens of this token.
Note that this will currently pick up direct subtokens but not
subtokens of subtokens.
token : `str`, optional
Limit the results to only this token.
token_type : `gafaelfawr.models.token.TokenType`, optional
Limit the results to tokens of this type.
ip_or_cidr : `str`, optional
Limit the results to changes made from this IPv4 or IPv6 address
or CIDR block. Unless the underlying database is PostgreSQL, the
CIDR block must be on an octet boundary.
Returns
-------
entries : List[`gafaelfawr.models.history.TokenChangeHistoryEntry`]
List of change history entries, which may be empty.
"""
query = self._session.query(TokenChangeHistory)
if since:
query = query.filter(TokenChangeHistory.event_time >= since)
if until:
query = query.filter(TokenChangeHistory.event_time <= until)
if username:
query = query.filter_by(username=username)
if actor:
query = query.filter_by(actor=actor)
if key:
query = query.filter(
or_(
TokenChangeHistory.token == key,
TokenChangeHistory.parent == key,
)
)
if token:
query = query.filter_by(token=token)
if token_type:
query = query.filter_by(token_type=token_type)
if ip_or_cidr:
query = self._apply_ip_or_cidr_filter(query, ip_or_cidr)
# Shunt the complicated case of a paginated query to a separate
# function to keep the logic more transparent.
if cursor or limit:
return self._paginated_query(query, cursor, limit)
# Perform the query and return the results.
query = query.order_by(
TokenChangeHistory.event_time.desc(), TokenChangeHistory.id.desc()
)
entries = query.all()
return PaginatedHistory[TokenChangeHistoryEntry](
entries=[TokenChangeHistoryEntry.from_orm(e) for e in entries],
count=len(entries),
prev_cursor=None,
next_cursor=None,
)
def _paginated_query(
self,
query: Query,
cursor: Optional[HistoryCursor],
limit: Optional[int],
) -> PaginatedHistory[TokenChangeHistoryEntry]:
"""Run a paginated query (one with a limit or a cursor)."""
limited_query = query
# Apply the cursor, if there is one.
if cursor:
limited_query = self._apply_cursor(limited_query, cursor)
# When retrieving a previous set of results using a previous
# cursor, we have to reverse the sort algorithm so that the cursor
# boundary can be applied correctly. We'll then later reverse the
# result set to return it in proper forward-sorted order.
if cursor and cursor.previous:
limited_query = limited_query.order_by(
TokenChangeHistory.event_time, TokenChangeHistory.id
)
else:
limited_query = limited_query.order_by(
TokenChangeHistory.event_time.desc(),
TokenChangeHistory.id.desc(),
)
# Grab one more element than the query limit so that we know whether
# to create a cursor (because there are more elements) and what the
# cursor value should be (for forward cursors).
if limit:
limited_query = limited_query.limit(limit + 1)
# Execute the query twice, once to get the next bach of results and
# once to get the count of all entries without pagination.
entries = limited_query.all()
count = query.count()
# Calculate the cursors, remove the extra element we asked for, and
# reverse the results again if we did a reverse sort because we were
# using a previous cursor.
prev_cursor = None
next_cursor = None
if cursor and cursor.previous:
if limit:
next_cursor = HistoryCursor.invert(cursor)
if len(entries) > limit:
prev_cursor = self._build_prev_cursor(entries[limit - 1])
entries = entries[:limit]
entries.reverse()
elif limit:
if cursor:
prev_cursor = HistoryCursor.invert(cursor)
if len(entries) > limit:
next_cursor = self._build_next_cursor(entries[limit])
entries = entries[:limit]
# Return the results.
return PaginatedHistory[TokenChangeHistoryEntry](
entries=[TokenChangeHistoryEntry.from_orm(e) for e in entries],
count=count,
prev_cursor=prev_cursor,
next_cursor=next_cursor,
)
@staticmethod
def _apply_cursor(query: Query, cursor: HistoryCursor) -> Query:
"""Apply a cursor to a query."""
if cursor.previous:
return query.filter(
or_(
TokenChangeHistory.event_time > cursor.time,
and_(
TokenChangeHistory.event_time == cursor.time,
TokenChangeHistory.id > cursor.id,
),
)
)
else:
return query.filter(
or_(
TokenChangeHistory.event_time < cursor.time,
and_(
TokenChangeHistory.event_time == cursor.time,
TokenChangeHistory.id <= cursor.id,
),
)
)
@staticmethod
def _build_next_cursor(entry: TokenChangeHistory) -> HistoryCursor:
"""Construct a next cursor for entries >= the given entry."""
next_time = normalize_datetime(entry.event_time)
assert next_time
return HistoryCursor(time=next_time, id=entry.id)
@staticmethod
def _build_prev_cursor(entry: TokenChangeHistory) -> HistoryCursor:
"""Construct a prev cursor for entries before the given entry."""
prev_time = normalize_datetime(entry.event_time)
assert prev_time
return HistoryCursor(time=prev_time, id=entry.id, previous=True)
def _apply_ip_or_cidr_filter(self, query: Query, ip_or_cidr: str) -> Query:
"""Apply an appropriate filter for an IP or CIDR block.
If the underlying database is not PostgreSQL, which supports native
CIDR membership queries, cheat and turn the CIDR block into a string
wildcard. This will only work for CIDR blocks on class boundaries,
but the intended supported database is PostgreSQL anyway.
"""
if "/" in ip_or_cidr:
if self._session.get_bind().name == "postgresql":
return query.filter(text(":c >> ip_address")).params(
c=ip_or_cidr
)
else:
if ":" in str(ip_or_cidr):
net = re.sub("::/[0-9]+$", ":%", ip_or_cidr)
else:
net = re.sub(r"(\.0)+/[0-9]+$", ".%", ip_or_cidr)
return query.filter(TokenChangeHistory.ip_address.like(net))
else:
return query.filter_by(ip_address=str(ip_or_cidr))
|
# Generated by Django 2.2.5 on 2020-06-30 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('subscriptions', '0002_auto_20200630_0617'),
]
operations = [
migrations.AddField(
model_name='subscriptionplan',
name='Account_type',
field=models.CharField(choices=[('Business Account', 'Business Account'), ('Personal Account', 'Personal Account')], default=1, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='subscriptionplan',
name='cost',
field=models.DecimalField(blank=True, decimal_places=4, help_text='the cost per recurrence of the plan', max_digits=19, null=True),
),
]
|
__version__ = "1.0.0"
import scrapy
import MyUtilities.common
from scrapy.crawler import CrawlerProcess
#Required Modules
##py -m pip install
#See: https://benbernardblog.com/web-scraping-and-crawling-are-perfectly-legal-right/#generaladviceforyourscrapingorcrawlingprojects
#See: http://doc.scrapy.org/en/1.0/topics/practices.html#bans
#Controllers
def build(*args, **kwargs):
"""Starts the GUI making process."""
return Spider(*args, **kwargs)
def runSpider(spiderClass):
"""Runs the scrapy spider.
See: https://doc.scrapy.org/en/latest/topics/commands.html
Use: http://doc.scrapy.org/en/1.0/topics/practices.html#run-scrapy-from-a-script
spiderClass (scrapy.Spider) - The spider to run
Example Input: runSpider(__file__)
"""
process = CrawlerProcess({
# 'USER_AGENT': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
})
process.crawl(spiderClass)
process.start() # the script will block here until the crawling is finished
class Spider():
def __init__(self, name = None, mySpider = None):
self.mySpider = mySpider or self.BaseSpider
self.mySpider.parent = self
self.name = name
parser = None
_url_login = None
def run(self):
if (self.parser):
if (self.parser.parent is None):
self.parser.parent = self
if (not self.url_start):
print("ERROR: No starting url given")
return False
if (self.url_login):
if (None in (self.parser.login_userLabel, self.parser.login_userValue, self.parser.login_passwordLabel, self.parser.login_passwordValue)):
print("ERROR: Missing login information")
return False
self.mySpider.run()
@MyUtilities.common.makeProperty()
class delay():
"""The delay of the spider."""
def getter(self):
return self.mySpider.download_delay
def setter(self, value):
self.mySpider.download_delay = value
def remover(self):
self.mySpider.download_delay = None
@MyUtilities.common.makeProperty()
class name():
"""The name of the spider."""
def getter(self):
return self.mySpider.name
def setter(self, value):
self.mySpider.name = value
def remover(self):
self.mySpider.name = None
@MyUtilities.common.makeProperty()
class url_start():
"""The starting url of the spider.
If there is no starting url, the login url will be used.
"""
def getter(self):
return self.mySpider.start_urls
def setter(self, value):
if (value):
self.mySpider.start_urls = [value]
else:
self.remover()
def remover(self):
self.mySpider.start_urls.clear()
if (self.url_login):
self.url_start = self.url_login
@MyUtilities.common.makeProperty()
class url_login():
"""The starting url of the spider."""
def getter(self):
return self._url_login
def setter(self, value):
self._url_login = value
if (not self.url_start):
self.url_start = value
def remover(self):
self._url_login = None
class BaseParser():
def __init__(self, parent):
self.parent = parent
spider = None
login_userLabel = None
login_userValue = None
login_passwordLabel = None
login_passwordValue = None
login_submitLabel = None
login_submitValue = None
def set_loginUser(self, label, value):
self.login_userLabel = label
self.login_userValue = value
def set_loginPassword(self, label, value):
self.login_passwordLabel = label
self.login_passwordValue = value
def set_loginSubmit(self, label, value):
self.login_submitLabel = label
self.login_submitValue = value
def login_check(self, response):
return self.parent.url_login == response.url
def loginParse(self, response):
"""Logs in the user if the username and password are given.
Can be overridden.
"""
if (self.login_submitLabel is None):
clickdata = None
elif (isinstance(self.login_submitLabel, int)):
clickdata = {"nr": self.login_submitLabel}
else:
clickdata = {self.login_submitLabel: self.login_submitValue}
return scrapy.FormRequest.from_response(
response,
formdata = {
self.login_userLabel: self.login_userValue,
self.login_passwordLabel: self.login_passwordValue,
},
clickdata = clickdata,
callback = self.after_loginParse,
)
def after_loginParse(self, response):
return self.startParse(response)
def startParse(self, response):
if (self.parent.url_login == response.url):
return self.loginParse(response)
return self.scrape(response)
# # form = response.xpath("//form")[0]
# # print(form.xpath("//input"))
# return scrapy.FormRequest.from_response(
# response,
# formdata = {"usr": "admin", "pwd": "12345"},
# callback = self.after_login,
# )
class BaseSpider(scrapy.Spider):
parent = None
name = None
start_urls = []
download_delay = 0
custom_settings = {
"ROBOTSTXT_OBEY": True,
"AUTOTHROTTLE_ENABLED": True,
}
@classmethod
def run(cls):
runSpider(cls)
def parse(self, response):
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(self.settings.attributes.items())
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
# self.parent.parser.spider = self
# return self.parent.parser.startParse(response)
if (__name__ == "__main__"):
class ExampleSpider_1(scrapy.Spider):
"""An example spider created using this tutorial: https://www.digitalocean.com/community/tutorials/how-to-crawl-a-web-page-with-scrapy-and-python-3 """
#Use: https://www.digitalocean.com/community/tutorials/how-to-crawl-a-web-page-with-scrapy-and-python-3#step-1-%E2%80%94-creating-a-basic-scraper
name = "brickset_spider"
start_urls = ['http://brickset.com/sets/year-2016']
def parse(self, response):
"""Parses the data.
See: http://doc.scrapy.org/en/1.0/topics/spiders.html#scrapy.spiders.Spider.parse
Use: https://www.digitalocean.com/community/tutorials/how-to-crawl-a-web-page-with-scrapy-and-python-3#step-2-%E2%80%94-extracting-data-from-a-page
"""
#<article class='set'>
SET_SELECTOR = ".set" #If you look at the HTML for the page, you'll see that each set is specified with the class *set*
for brickset in response.css(SET_SELECTOR):
#<h1>Brick Bank</h1>
NAME_SELECTOR = "h1 ::text" #Another look at the source of the page we're parsing tells us that the name of each set is stored within an h1 tag for each set. We append ::text to our selector for the name. That's a CSS pseudo-selector that fetches the text inside of the a tag rather than the tag itself.
#<img src="http://images.brickset.com/sets/small/10251-1.jpg?201510121127" title="10251-1: Brick Bank"></a>
IMAGE_SELECTOR = "img ::attr(src)" #The image for the set is stored in the src attribute of an img tag inside an a tag at the start of the set. We can use another CSS selector to fetch this value just like we did when we grabbed the name of each set.
# <dl>
# <dt>Pieces</dt>
# <dd><a class="plain" href="/inventories/10251-1">2380</a></dd>
# <dt>Minifigs</dt>
# <dd><a class="plain" href="/minifigs/inset-10251-1">5</a></dd>
# ...
# </dl>
PIECES_SELECTOR = ".//dl[dt/text() = 'Pieces']/dd/a/text()" #Getting the number of pieces is a little trickier. There's a dt tag that contains the text Pieces, and then a dd tag that follows it which contains the actual number of pieces. We'll use XPath, a query language for traversing XML, to grab this, because it's too complex to be represented using CSS selectors.
MINIFIGS_SELECTOR = ".//dl[dt/text() = 'Minifigs']/dd[2]/a/text()" #Getting the number of minifigs in a set is similar to getting the number of pieces. There's a dt tag that contains the text Minifigs, followed by a dd tag right after that with the number.
yield {
"name": brickset.css(NAME_SELECTOR).extract_first(), #We call extract_first() on the object returned by brickset.css(NAME_SELECTOR) because we just want the first element that matches the selector. This gives us a string, rather than a list of elements.
"pieces": brickset.xpath(PIECES_SELECTOR).extract_first(),
"minifigs": brickset.xpath(MINIFIGS_SELECTOR).extract_first(),
"image": brickset.css(IMAGE_SELECTOR).extract_first(),
}
# <li class="next">
# <a href="http://brickset.com/sets/year-2017/page-2">›</a>
# </li>
NEXT_PAGE_SELECTOR = ".next a ::attr(href)" #There's an li tag with the class of next, and inside that tag, there's an a tag with a link to the next page.
next_page = response.css(NEXT_PAGE_SELECTOR).extract_first()
if (next_page): #All we have to do is tell the scraper to follow that link if it exists
yield scrapy.Request( #The scrapy.Request is a value that we return saying "Hey, crawl this page"
response.urljoin(next_page),
callback = self.parse, #once youโve gotten the HTML from this page, pass it back to this method so we can parse it, extract the data, and find the next page.
) #This is the key piece of web scraping: finding and following links. In this example, itโs very linear; one page has a link to the next page until weโve hit the last page, But you could follow links to tags, or other search results, or any other URL youโd like.
class ExampleSpider_2(scrapy.Spider):
"""An example spider that logs in, then starts scraping.
See: http://scrapingauthority.com/2016/11/22/scrapy-login/
See: http://doc.scrapy.org/en/1.0/topics/request-response.html#topics-request-response-ref-request-userlogin
See: https://stackoverflow.com/questions/20039643/how-to-scrape-a-website-that-requires-login-first-with-python/31585574#31585574
See: https://web.archive.org/web/20110517140553/http://dev.scrapy.org/wiki/CommunitySpiders#SilverStripeCMSdemospiderwithloginhandling
"""
name = "LoginSpider"
start_urls = ["http://testing-ground.scraping.pro/login"]
def parse(self, response):
# #See: http://doc.scrapy.org/en/1.0/intro/tutorial.html#introduction-to-selectors
form = response.xpath("//form")[0]
print(form.xpath("//input"))
return scrapy.FormRequest.from_response(
response,
formdata = {"usr": "admin", "pwd": "12345"},
clickdata = {"type": "submit"},
callback = self.after_login,
)
def after_login(self, response):
# check login succeed before going on
#<h3 class=\'success\'>WELCOME :)</h3>
if (not response.css(".success")):
self.logger.error("\nLogin failed\n")
return
self.logger.error("\nLogin successful\n")
def exampleSpider_3():
spider = build(name = "Automatic_Spider")
class Parser(spider.BaseParser):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.set_loginUser("usr", "admin")
self.set_loginPassword("pwd", "12345")
self.set_loginSubmit("type", "submit")
def after_loginParse(self, response):
if (not response.css(".success")):
self.spider.logger.error("\nLogin failed\n")
return
self.spider.logger.error("\nLogin successful\n")
spider.url_login = "http://testing-ground.scraping.pro/login"
spider.parser = Parser(spider)
spider.run()
runSpider(ExampleSpider_1)
# runSpider(ExampleSpider_2)
# exampleSpider_3()
|
# Copyright 2021 ncdhz
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from PyQt5.QtWidgets import QWidget, QSplitter, QHBoxLayout
from PyQt5.QtCore import Qt
from sentenceLabelLib.LeftRightLabel import Label
from sentenceLabelLib.ScrollPanel import ScrollPanel
class MainPanel(QWidget):
def __init__(self, parent, icon_path_left, icon_path_right, move_icon_path_left, move_icon_path_right):
super(MainPanel, self).__init__()
self.left_right = QSplitter(Qt.Orientation.Horizontal, self)
self.top_bottom = QSplitter(Qt.Orientation.Vertical, self)
self.left = Label(parent, icon_path_left, move_icon_path_left, True)
self.right = Label(parent, icon_path_right, move_icon_path_right)
self.sp_left = ScrollPanel(parent, ScrollPanel.Left)
self.sp_top = ScrollPanel(parent, ScrollPanel.Top)
self.sp_bottom = ScrollPanel(parent, ScrollPanel.Bottom)
self.main_layout = QHBoxLayout(self)
self.init_layout()
def init_layout(self):
self.sp_left.setMinimumSize(400, 450)
self.sp_top.setMinimumSize(500, 300)
self.sp_bottom.setMinimumSize(500, 150)
self.left_right.addWidget(self.sp_left)
self.top_bottom.addWidget(self.sp_top)
self.top_bottom.addWidget(self.sp_bottom)
self.left_right.addWidget(self.top_bottom)
self.main_layout.addWidget(self.left)
self.main_layout.addWidget(self.left_right)
self.main_layout.addWidget(self.right)
self.setLayout(self.main_layout)
def refresh(self):
self.sp_left.refresh()
self.sp_top.refresh()
self.sp_bottom.refresh()
|
# Generated by Django 3.1.3 on 2020-11-28 05:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0012_request_status'),
]
operations = [
migrations.AlterModelOptions(
name='request',
options={'ordering': ['-created_at']},
),
migrations.AddField(
model_name='request',
name='is_draft',
field=models.BooleanField(default=True, verbose_name='ะงะตัะฝะพะฒะธะบ?'),
),
]
|
S = input()
print(S.count("+")-S.count("-"))
|
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2012 Jens Lindstrรถm, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
def find_file(db, path):
path = path.lstrip("/")
assert not path.endswith("/")
cursor = db.cursor()
cursor.execute("SELECT id, path FROM files WHERE MD5(path)=MD5(%s)", (path,))
row = cursor.fetchone()
if row:
file_id, found_path = row
assert path == found_path, "MD5 collision in files table: %r != %r" % (path, found_path)
return file_id
cursor.execute("INSERT INTO files (path) VALUES (%s) RETURNING id", (path,))
return cursor.fetchone()[0]
def find_files(db, files):
for file in files:
file.id = find_file(db, file.path)
def describe_file(db, file_id):
cursor = db.cursor()
cursor.execute("SELECT path FROM files WHERE id=%s", (file_id,))
return cursor.fetchone()[0]
|
##
## Dictionary based bibliography manager
##
## @author Daniel J. Finnegan
## @date February 2019
import importlib
# import difflib
import re
from mendproc.parsers.bibtexparser import BibTexParser
from mendproc.parsers.csvparser import CSVParser
from mendproc.parsers.mswordxmlparser import MSWordXMLParser
from mendproc import Parsers
## Internal manager over the bib dictionary
class BibManager ():
def __init__ (self):
self.entries = []
self.lines = []
def add_entry (self):
self.entries.append (entry)
def dump_keywords (self, lowercase=True):
keywords = []
for entry in self.entries:
if entry['keywords'] != '':
keyword_list = entry['keywords'].split (',')
for keyword in keyword_list:
if lowercase:
keywords.append (keyword.lower ().strip ())
else:
keywords.append (keyword.strip ())
return (keywords)
def cutoff_year (self, year, inclusive=True):
entries = []
for entry in self.entries:
if entry['year'] != '':
int_year = int (entry['year'])
if inclusive:
if int_year >= year:
entries.append (entry)
else:
if int_year > year:
entries.append (entry)
self.entries = []
self.entries = entries
return (entries)
## This method will remove all entries that don't match
## against pattern in their keywords
def cutoff_keywords_regex (self, pattern):
compiled_pattern = re.compile (pattern)
entries = []
for entry in self.entries:
if entry['keywords'] != '':
match_obj = compiled_pattern.search (entry['keywords'])
if match_obj:
entries.append (entry)
self.entries = []
self.entries = entries
return (entries)
def dump_authors (self):
authors = []
for entry in self.entries:
author_list = entry['author'].split (';')
for author in author_list:
authors.append (author)
return (authors)
def lines2entries (self, data_lines, data_type='bibtex'):
if data_type in Parsers:
modulepath, classname = Parsers[data_type].rsplit ('.', maxsplit=1)
module = importlib.import_module(modulepath)
class_ = getattr(module, classname)
parser = class_(data_type)
else:
parser = None
if parser == None:
raise ValueError ('Unknown type ' + data_type + '. Cannot parse')
else:
self.entries = parser.parse_lines (data_lines) ## Otherwise parse the lines into bib entries
def entries2lines (self, data_type='bibtex'):
if data_type in Parsers:
modulepath, classname = Parsers[data_type].rsplit ('.', maxsplit=1)
module = importlib.import_module(modulepath)
class_ = getattr(module, classname)
parser = class_(data_type)
else:
parser = None
if parser == None:
raise ValueError ('Unknown type ' + data_type + '. Cannot parse')
else:
self.lines = parser.parse_entries (self.entries)
|
from pygame import *
font.init()
font2 = font.SysFont('Arial', 36)
mixer.init()
game = True
clock = time.Clock()
FPS = 120
win = display.set_mode((1000, 700))
display.set_caption("game")
place = transform.scale(image.load('place.png'),(1000,700))
class GameSprite(sprite.Sprite):
def __init__(self, gamer_image,gamer_x, gamer_y, gamer_speed, size_x, size_y):
super().__init__()
self.image = transform.scale(image.load(gamer_image),(size_x,size_y))
self.speed = gamer_speed
self.rect = self.image.get_rect()
self.rect.x = gamer_x
self.rect.y = gamer_y
def reset(self):
win.blit(self.image,(self.rect.x, self.rect.y))
class Player(GameSprite):
def update(self):
key_pressed = key.get_pressed()
if key_pressed[K_UP] and self.rect.y >= 108:
self.rect.y -= self.speed
if key_pressed[K_DOWN] and self.rect.y <= 592:
self.rect.y += self.speed
if key_pressed[K_SPACE]:
shoot()
class Bullet(GameSprite):
def update(self):
self.rect.x = self.rect.x - 10
if self.rect.x <= 0:
self.kill()
def shoot():
bullet = Bullet('bullet.png', turrel.rect.x, turrel.rect.y,10, 30, 15)
bullets.add(bullet)
global shoots
shoots = 10
shoots = shoots - 1
bullets = sprite.Group()
turrel = Player('turrel.png', 860, 400,1, 138, 108)
while game:
clock.tick(FPS)
win.blit(place,(0,0))
for e in event.get():
if e.type == QUIT:
game = False
text = font2.render("ะะฐััะดั" + str(shoots) , 1, (254, 195, 2))
win.blit(text, (300, 250))
turrel.reset()
turrel.update()
bullets.update()
bullets.draw(win)
display.update()
|
"""Shuffle your team for virtual stand-ups!"""
import argparse
import enum
import json
import random
from typing import Dict, Generator, List, Optional
def _assert_members_xor_subteams(members, subteams):
"""Assert that only one of members or subteams is not None.
Args:
members: The contents of the 'members' field.
subteams: The contents of the 'subteams' field.
Raises:
ValueError if either both are neither arguments are None.
"""
no_members = members is None
no_subteams = subteams is None
if no_members == no_subteams:
if no_members:
raise ValueError('Either members or subteams are required.')
raise ValueError(
'Team cannot contain members and subteams at the same level.')
class ShuffleMethod(enum.Enum):
NONE = 'none'
GROUPED = 'grouped'
UNGROUPED = 'ungrouped'
class Team:
"""A team that will present in some order at a meeting.
The team may contain a list of members or a list of subteams, not both.
Subteams are Team objects subject to the same constraints.
Attributes:
name: The name of this team.
members: A list of individuals comprising this team.
subteams: A list of subteams comprising this team.
"""
def __init__(self,
name: str,
members: Optional[List[str]] = None,
subteams: Optional[List['Team']] = None) -> None:
_assert_members_xor_subteams(members, subteams)
self.name = name
self.members = members
self.subteams = subteams
def get_members(self,
shuffle_method: ShuffleMethod = ShuffleMethod.GROUPED
) -> List[str]:
"""Get an ordered list of the members of the team.
This is intended to be used as the order in which each member will
present at a meeting (e.g. a standup). Each member of the team or any
subteam will appear exactly once.
This method returns the members in an order determined by the
shuffle_method parameter. The shuffle_method can be:
none: Return team members in the order declared by the team
object. Do not randomize the list.
grouped: Shuffle the members of each subteam, but return the
each subteam in the order declared by the team object.
ungrouped: Shuffle all members, disregarding subteam, returning
any permutation of the members of the team.
Args:
shuffle_method: The method by which to shuffle the team members.
Returns:
A list of all team members, ordered as specified above.
"""
teams = [self]
members = []
while teams:
team = teams.pop()
if team.subteams:
teams.extend(reversed(team.subteams))
else:
if shuffle_method == ShuffleMethod.GROUPED:
members.extend(random.sample(team.members,
len(team.members)))
else:
members.extend(team.members)
if shuffle_method == ShuffleMethod.UNGROUPED:
random.shuffle(members)
return members
def to_dict(self) -> Dict:
"""Write the team out as a dictionary.
Returns:
A dictionary with the contents of this team.
"""
team_dict = {'name': self.name}
if self.members is None:
team_dict['subteams'] = [
subteam.to_dict() for subteam in self.subteams]
else:
team_dict['members'] = self.members
return team_dict
def build_team_from_dict(team_dict: Dict) -> Team:
"""Build a team object based on the contents of a dictionary.
In the case of subteams, this function will recurse down the dictionary.
The same constraints on team contents apply here as in the Team
constructor.
Args;
team_dict: A dictionary containing the team structure.
Returns:
A Team object.
"""
_assert_members_xor_subteams(team_dict.get('members'),
team_dict.get('subteams'))
if 'members' in team_dict:
return Team(team_dict['name'], members=team_dict['members'])
return Team(team_dict['name'],
subteams=[build_team_from_dict(subteam)
for subteam in team_dict['subteams']])
def build_team_from_file(filename: str) -> Team:
"""Build a team object based on the contents of a JSON file.
Args:
filename: A JSON file containing the team structure.
Returns:
A Team object.
"""
with open(filename) as json_file:
team_dict = json.load(json_file)
return build_team_from_dict(team_dict)
def get_answers(limit: int = -1,
prompt: str = '') -> Generator[str, None, None]:
"""Get answers from input, stopping when an empty input is given.
Args:
limit: The maximum number of answers to accept. The default value of
-1 indicates an indefinite number of answers will be accepted.
prompt: The prompt when accepting an answer. The default value is the
empty string.
Returns:
A generator which yields the answers until exhausted.
"""
count = 0
answer = 'init'
while (limit < 0 or count < limit) and answer:
answer = input(prompt)
count += 1
if answer:
yield answer
def prompt_for_team(name: Optional[str] = None) -> Dict:
"""Prompt user to construct a team.
Args:
name: The name of the team or subteam. Will prompt for a name if none
is provided.
Returns:
A dictionary representing the team.
"""
team_name = name or input('Team name? ')
team_dict = {'name': team_name}
has_subteams = input('Will {} have subteams? Y/N: '.format(team_name))
if has_subteams.lower().startswith('y'):
team_dict['subteams'] = [prompt_for_team(name) for name in get_answers(
prompt='{} subteam name? '.format(team_name))]
else:
team_dict['members'] = [name for name in get_answers(
prompt='{} member name: '.format(team_name))]
return team_dict
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--create', action='store_true',
help='Create a team interactively via the command '
'line')
parser.add_argument('--filename',
help='Path to JSON file containing team members.')
parser.add_argument('--shuffle',
choices=[c.value for c in ShuffleMethod],
default='ungrouped',
help='Whether to shuffle subteams by group, shuffle '
'the whole list, or return list in order.')
args = parser.parse_args()
if not args.create and not args.filename:
raise RuntimeError('No team file provided and create not specified.')
if args.create:
team_dict = prompt_for_team()
if args.filename:
with open(args.filename, 'w') as json_file:
json.dump(team_dict, json_file)
team = build_team_from_dict(team_dict)
else:
team = build_team_from_file(args.filename)
print('\n'.join(team.get_members(ShuffleMethod(args.shuffle))))
if __name__ == '__main__':
main()
|
import pandas as pd
import plotly.graph_objs as go
import plotly.offline as offline
offline.init_notebook_mode(connected=True)
# Colors for plotting
EDFGreen = '#509E2F'
EDFLightGreen = '#C4D600'
EDFOrange = '#FE5815'
EDFBlue = '#001A70'
EDFColors = [EDFGreen, EDFBlue, EDFOrange]
def plotly_data_by_column(df):
if df.columns.nlevels > 1:
new_cols = []
for level in df.columns.names:
new_cols.append(df.columns.get_level_values(level=level).astype(str))
new_cols = pd.MultiIndex.from_arrays(new_cols).map('_'.join)
df.columns = new_cols
plotting_data = [{'x': df.index, 'y': df[col], 'name': col, 'mode':'lines'} for col in df.columns]
return plotting_data
def plotly_data_by_column_line(df, kind='line'):
if df.columns.nlevels > 1:
new_cols = []
for level in df.columns.names:
new_cols.append(df.columns.get_level_values(level=level).astype(str))
new_cols = pd.MultiIndex.from_arrays(new_cols).map('_'.join)
df.columns = new_cols
if kind=='line':
plotting_data = [{'x': df.index, 'y': df[col], 'name': col, 'mode':'lines'} for col in df.columns]
elif kind=='bar':
plotting_data = [go.Bar(x=df.index, y=df[col], name=col) for col in df.columns]
return plotting_data
|
# coding=utf-8
# !/usr/bin/env python
"""
:mod:"Vacuubrand_CVC_3000" -- API for Vacuubrand CVC3000 remote controllable vacuum controller
===================================
.. module:: Vacuubrand_CVC_3000
:platform: Windows
:synopsis: Control CVC3000 vacuum controller.
.. moduleauthor:: Sebastian Steiner <s.steiner.1@research.gla.ac.uk>
(c) 2017 The Cronin Group, University of Glasgow
This provides a python class for Vacuubrand vacuum pumps with
CVC 3000 vacuum controller.
The command implementation is based on the english manual:
English manual version: 999222 / 11/03/2016 Pages 40 - 45
For style guide used see http://xkcd.com/1513/
"""
# system imports
import re
import serial
import os
import inspect
import sys
from time import sleep
HERE = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(os.path.join(HERE, '..'))
# additional module imports
from SerialDevice.serial_labware import SerialDevice, command
class CVC3000(SerialDevice):
"""
This provides a python class for Vacuubrand vacuum pumps with
CVC 3000 vacuum controller.
The command implementation is based on the english manual:
English manual version: 999222 / 11/03/2016 Pages 40 - 45
"""
def __init__(self, port=None, device_name=None, connect_on_instantiation=False, soft_fail_for_testing=False):
"""
Initializer of the IKARETControlVisc class
:param str port: The port name/number of the vacuum pump
:param str name: A descriptive name for the device, used mainly in debug prints.
:param bool connect_on_instantiation: (optional) determines if the connection is established on instantiation of
the class. Default: Off
"""
# implement class logger
super().__init__(port, device_name, soft_fail_for_testing)
# serial settings
self.baudrate = 19200
self.bytesize = serial.EIGHTBITS
self.parity = serial.PARITY_NONE
self.rtscts = True
# answer patterns
self.getanswer = re.compile("([0-9.]+) ([0-9A-z%]+)\r\n")
self.setanswer = re.compile("([0-9A-z%]+)\r\n")
self.timepattern = re.compile("\d{2}:\d{2}")
self.timeanswer = re.compile("(\d{2}:\d{2}) ([hms]:[hms])\r\n")
self.timesecondsanswer = re.compile("(\d{2}:\d{2}:\d{2}) ([hms]:[hms]:[hms])\r\n")
# dictionary for modes
self.MODES_3 = {
"VACUULAN": 0,
"pump down": 1,
"vac control": 2,
"auto": 3,
"auto low": 30,
"auto normal": 31,
"auto high": 32,
"program": 4
}
# DOCUMENTED COMMANDS for easier maintenance
# general commands
self.SET_CONTROLLER_VERSION = "CVC" # params: 2: CVC 2000; 3: CVC 3000
self.ECHO = "ECHO" # params: 0: echo off, returns nothing; 1: echo on, returns 1
self.STORE = "STORE" # no params, store settings permanently
# CVC 2000 command set
# read commands
self.GET_CURRENT_PRESSURE_2 = "IN_PV_1" # unit mbar/hPa/Torr (according to preselection)
self.GET_CURRENT_FREQUENCY_2 = "IN_PV_2" # unit Hz
self.GET_DEVICE_CONFIG_2 = "IN_CFG" # unit none, for decoding see manual p. 41
self.GET_ERROR_2 = "IN_ERR" # unit none, for decoding see manual p. 41
self.GET_STATUS_2 = "IN_STAT" # unit none, for decoding see manual p. 41
# write commands
self.SET_MODE_2 = "OUT_MODE" # unit none, for parameters see manual p. 41
self.SET_VACUUM_2 = "OUT_SP_1" # unit mbar/hPa/Torr (according to preselection)
self.SET_VACUUM_WITH_VENTING_2 = "OUT_SP_V" # unit mbar/hPa/Torr (according to preselection)
self.SET_FREQUENCY_2 = "OUT_SP_2" # unit Hz; format XX.X; 99.9 for "HI"
self.SET_VACUUM_FOR_SWITCH_ON_2 = "OUT_SP_3" # unit mbar/hPa/Torr (according to preselection), VACUUยทLAN only
self.SET_DELAY_2 = "OUT_SP_4" # unit hh:mm; format XX:XX; VACUUยทLAN only; max. 05:00
self.SET_SWITCHING_OFF_VAC_2 = "OUT_SP_5" # unit mbar/hPa/Torr (according to preselection)
self.SET_SWITCHING_OFF_TIME_2 = "OUT_SP_6" # unit hh:mm; format XX:XX
self.START_2 = "START" # always returns 1
self.STOP_2 = "STOP" # params: None: stops operation, returns 0; 1: stops operation,
# returns 1; 2: stops operation and stores current pressure as
# new setpoint, returns 2
self.REMOTE_2 = "REMOTE" # params: 0: local operation, returns 0;
# 1: remote operation, returns 1
self.VENT_VALVE_2 = "OUT_VENT" # params: 0: closed, returns 0;
# 1: open and stop operation, returns 1
# CVC 3000 command set
# read commands
self.GET_CURRENT_PRESSURE_3 = "IN_PV_1" # unit mbar/hPa/Torr (according to preselection)
self.GET_TRANSDUCER_X_PRESSURE_3 = "IN_PV_S" # command is followed by transducer Nr without space;
# unit mbar/hPa/Torr (according to preselection)
self.GET_CURRENT_SPEED_3 = "IN_PV_2" # unit 1-100% or "HI"
self.GET_PROCESS_TIME_3 = "IN_PV_3" # unit hh:mm; format XX:XX h:m
self.GET_ALL_PRESSURES_3 = "IN_PV_X" # unit mbar/hPa/Torr (according to preselection);
# returns pressures of all connected sensors separated by spaces
self.GET_CONTROLLER_RUN_TIME_3 = "IN_PV_T" # unit days and hours; format XXXXdXXh
self.GET_DEVICE_CONFIG_3 = "IN_CFG" # unit none, for decoding see manual p. 42
self.GET_ERROR_3 = "IN_ERR" # unit none, for decoding see manual p. 42
self.GET_STATUS_3 = "IN_STAT" # unit none, for decoding see manual p. 42
self.GET_VACUUM_SETPOINT_3 = "IN_SP_1" # unit mbar/hPa/Torr (according to preselection)
self.GET_MAX_SPEED_SETPOINT_3 = "IN_SP_2" # unit 1-100% or "HI"
self.GET_SWITCHING_PRESSURE_3 = "IN_SP_3" # switching pressure for VACUUยทLAN or two point control,
# unit mbar/hPa/Torr (according to preselection)
self.GET_DELAY_3 = "IN_SP_3" # unit hh:mm; format XX:XX h:m; 00:00 = off
self.GET_SWITCHING_OFF_VAC_3 = "IN_SP_5" # "maximum" for "vac control"; "minimum" for "pump down";
# unit mbar/hPa/Torr (according to preselection)
self.GET_RUNTIME_3 = "IN_SP_6" # process runtime; unit hh:mm; format XX:XX h:m
self.GET_STEP_TIME_3 = "IN_SP_P1" # followed by a number y without space; time in program step y
# unit hh:mm:ss; format XX:XX:XX h:m:s
self.GET_STEP_PRESSURE_3 = "IN_SP_P2" # followed by a number y without space;
# pressure in program step y;
# unit mbar/hPa/Torr (according to preselection)
self.GET_STEP_VALVE_3 = "IN_SP_P3" # followed by a number y without space; venting valve in
# program step y; unit none; 0: no vent; 1: vent
self.GET_STEP_STEP_3 = "IN_SP_P4" # followed by a number y without space; step mark in
# program step y; unit none; 0: no step; 1: step
self.GET_STEP_AUTO_3 = "IN_SP_P5" # followed by a number y without space; automatic boiling point
# finding in program step y; 0: no auto; 1: auto
self.GET_VERSION_3 = "IN_VER" # software version; format CVC 3000 VX.XX
# write commands
self.SET_MODE_3 = "OUT_MODE" # unit none, for parameters see manual p. 44
self.SET_CONFIG_3 = "OUT_CFG" # unit none, for parameters see manual p. 44
self.SET_VACUUM_3 = "OUT_SP_1" # unit mbar/hPa/Torr (according to preselection)
self.SET_VACUUM_WITH_VENTING_3 = "OUT_SP_V" # unit mbar/hPa/Torr (according to preselection)
self.SET_SPEED_3 = "OUT_SP_2" # unit 1-100% or "HI"
self.SET_START_UP_PRESSURE_3 = "OUT_SP_3" # unit mbar/hPa/Torr (according to preselection)
self.SET_DELAY_3 = "OUT_SP_4" # unit hh:mm; format XX:XX
self.SET_SWITCHING_OFF_VAC_3 = "OUT_SP_5" # "maximum" for "vac control"; "minimum" for "pump down";
# unit mbar/hPa/Torr (according to preselection)
self.SET_RUNTIME_3 = "OUT_SP_6" # process runtime; unit hh:mm
self.OPEN_PROGRAM_3 = "OUT_SP_PL" # unit none, program 0...9
self.STORE_PROGRAM_3 = "OUT_SP_PS" # unit none, program 0...9
self.SET_STEP_TIME_3 = "OUT_SP_P1" # followed by a number y without space; time in program step y
# unit hh:mm:ss; format XX:XX:XX or +XX:XX:XX for additive time
self.SET_STEP_PRESSURE_3 = "OUT_SP_P2" # followed by a number y without space;
# pressure in program step y;
# unit mbar/hPa/Torr (according to preselection)
self.SET_STEP_VALVE_3 = "OUT_SP_P3" # followed by a number y without space; venting valve in
# program step y; unit none; 0: no vent; 1: vent
self.SET_STEP_STEP_3 = "OUT_SP_P4" # followed by a number y without space; step mark in
# program step y; unit none; 0: no step; 1: step
self.SET_STEP_AUTO_3 = "OUT_SP_P5" # followed by a number y without space; automatic boiling point
# finding in program step y; 0: no auto; 1: automatic
# determination of boiling point,
# 2: automatic adaption to changes
self.START_3 = "START" # always returns 1
self.STOP_3 = "STOP" # params: None: stops operation, returns 0; 1: stops operation,
# returns 1; 2: stops operation and stores current pressure as
# new setpoint, returns 2
self.REMOTE_3 = "REMOTE" # params: 0: local operation, returns 0;
# 1: remote operation, returns 1
self.VENT_VALVE_3 = "OUT_VENT" # params: 0: closed, returns 0;
# 1: open and stop operation, returns 1
self.SET_SENSOR_3 = "OUT_SENSOR" # params: 1: internal sensor; 2...9: external sensors
self.launch_command_handler()
if connect_on_instantiation:
self.open_connection()
def initialise(self, controller_version="CVC 3000"):
"""
Does the necessary setup work. Turns ECHO on, sets the desired controller version
and sets the mode to Vac control (in order to always start from the same spot).
:param controller_version: desired controller version ("CVC 2000" or "CVC 3000")
"""
self.logger.debug("Initialising CVC3000...")
if controller_version == "CVC 2000":
# switch to remote control
self.send_message("{0} {1}".format(self.REMOTE_2, "1"), get_return=True)
# turn echo on
self.send_message("{0} {1}".format(self.ECHO, "1"), get_return=True, return_pattern=self.setanswer)
# switch to CVC 2000 command set
self.send_message("{0} {1}".format(self.SET_CONTROLLER_VERSION, "2"), get_return=True, return_pattern=self.setanswer)
# switch to Vac control mode
self.send_message("{0} {1}".format(self.SET_MODE_2, "2"), get_return=True, return_pattern=self.setanswer)
elif controller_version == "CVC 3000":
# switch to remote control
self.send_message("{0} {1}".format(self.REMOTE_3, "1"), get_return=True)
# turn echo on
self.send_message("{0} {1}".format(self.ECHO, "1"), get_return=True, return_pattern=self.setanswer)
# switch to CVC 3000 command set
self.send_message("{0} {1}".format(self.SET_CONTROLLER_VERSION, "3"), get_return=True, return_pattern=self.setanswer)
# switch to Vac control mode
self.send_message("{0} {1}".format(self.SET_MODE_3, "2"), get_return=True)
self.logger.debug("Done.")
# CVC 2000 methods
# TODO: I can't be asked to write that just now, if someone is especially bored please have a go at it...
# CVC 3000 methods
@command
def set_mode(self, mode):
"""
Sets the mode of the vacuum controller.
:param mode: (string) the mode to be set. Must be a valid key in the MODES_3 dict.
:return: call back to __send_message with an order to set the mode
"""
try:
mode_number = self.MODES_3[mode]
except KeyError:
raise (KeyError("Error setting mode. Input is not a valid mode name \"{0}\"".format(mode)))
self.logger.debug("Setting mode to {0}...".format(mode))
reply = self.send_message("{0} {1}".format(self.SET_MODE_3, mode_number), True, self.setanswer)
if int(reply[0]) != mode_number:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Send: \"{0}\". "
"Received: \"{1}\".".format(mode_number, reply[0])
)
@property
@command
def vacuum_sp(self):
"""
Reads the set point (target) for the vacuum in mode Vac control
:return: call back to send_message with a request to return a value
"""
return self.send_message(self.GET_VACUUM_SETPOINT_3, True, self.getanswer)
@vacuum_sp.setter
@command
def vacuum_sp(self, vacuum=None):
"""
Sets the vacuum and returns the set point from the pump so the user can verify that it was successful
:param vacuum: (integer) the target vacuum
"""
try:
# type checking of the vacuum that the user provided
vacuum = int(vacuum)
except ValueError:
raise(ValueError("Error setting vacuum. Vacuum was not a valid integer \"{0}\"".format(vacuum)))
self.logger.debug("Setting vacuum setpoint to {0}...".format(vacuum))
# actually sending the command
reply = self.send_message("{0} {1}".format(self.SET_VACUUM_3, vacuum), True, self.setanswer)
try:
reply_value = int(reply[0])
except ValueError:
raise (ValueError("Error setting vacuum. Reply was not a valid integer \"{0}\"".format(reply[0])))
if reply_value != vacuum:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Send: \"{0}\". "
"Received: \"{1}\".".format(vacuum, reply_value)
)
@property
@command
def speed_sp(self):
"""
Reads the set point (target) for the speed.
Returns:
call back to send_message with a request to return a value
"""
return self.send_message(self.GET_MAX_SPEED_SETPOINT_3, True, self.getanswer)
@speed_sp.setter
@command
def speed_sp(self, speed=None):
"""
Sets the maximum pumping speed (1-100%).
Args:
speed (int): Maximum speed setpoint. Has to be between 1 and 100
"""
try:
# type checking of the speed that the user provided
speed = int(speed)
except ValueError:
raise (ValueError("Error setting speed. Speed was not a valid integer \"{0}\"".format(speed)))
if not 1 <= speed <= 100:
raise (ValueError("Error setting speed. Speed was not between 1 and 100% \"{0}\"".format(speed)))
self.logger.debug("Setting speed setpoint to {0}...".format(speed))
if speed == 100:
speed = "HI"
# actually sending the command
reply = self.send_message("{0} {1}".format(self.SET_SPEED_3, speed), True, self.setanswer)
if speed == "HI" and reply[0] != "HI":
try:
reply_value = int(reply[0])
except ValueError:
raise (ValueError("Error setting speed. Reply was not a valid integer \"{0}\"".format(reply[0])))
if reply_value != speed:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Send: \"{0}\". "
"Received: \"{1}\".".format(speed, reply_value)
)
@command
def vacuum_pv(self):
"""
Reads the process value (actual pressure) for the vacuum in any mode
:return: call back to send_message with a request to return a value
"""
return self.send_message(self.GET_CURRENT_PRESSURE_3, True, self.getanswer)
@command
def start(self):
"""
Starts the current function.
:return: True if pump responds properly, else a ValueError is raised.
"""
self.logger.debug("Starting operation...")
reply = self.send_message(self.START_3, True, self.setanswer)
try:
reply_value = int(reply[0])
except ValueError:
raise (ValueError("Error starting operation. Reply was not a valid integer \"{0}\"".format(reply[0])))
if reply_value == 1:
self.logger.debug("Done.")
return True
elif reply_value == "":
raise ValueError("Value Error. Vacuum pump appears to be already running!")
else:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Expected: 1. "
"Received: \"{0}\".".format(reply_value)
)
@command
def stop(self, stop_mode=None):
"""
Stops the current function. This does not utilise the different stop parameters, but they seem to serve
no apparent purpose anyway.
:return: True if pump responds properly, else a ValueError is raised.
"""
self.logger.debug("Stopping operation...")
reply = self.send_message(self.STOP_3, True, self.setanswer)
try:
reply_value = int(reply[0])
except ValueError:
raise (ValueError(
"Error stopping operation. Reply was not a valid integer \"{0}\"".format(reply[0])))
if reply_value != 0:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Expected: {0}. "
"Received: \"{1}\".".format(stop_mode, reply_value)
)
self.logger.debug("Done.")
@command
def vent(self, vent_status=1):
"""
Controls the vent valve.
:param vent_status: 0: valve closed; 1: valve open; 2: vent to atmospheric pressure
:return: True if pump responds properly, else a ValueError is raised.
"""
try:
# type checking of the vent status that the user provided
vent_status = int(vent_status)
except ValueError:
raise (ValueError("Error while venting. Vent status was not a valid integer \"{0}\"".format(vent_status)))
self.logger.debug("Actuating vent valve...")
# actually sending the command
reply = self.send_message("{0} {1}".format(self.VENT_VALVE_3, vent_status), True, self.setanswer)
try:
reply_value = int(reply[0])
except ValueError:
raise (ValueError("Error while venting. Reply was not a valid integer \"{0}\"".format(reply[0])))
if reply_value == vent_status:
self.logger.debug("Done.")
return True
else:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Send: \"{0}\". "
"Received: \"{1}\".".format(vent_status, reply_value)
)
@command
def query_status(self):
"""
Queries the status of the pump. For individual parameters see manual p. 43
:return: dictionary of status items
"""
status = {} # initialise empty dict
reply = self.send_message(self.GET_STATUS_3, True) # get status as series of digits
# deconstruct reply and stuff into dict
try:
status["Pump state"] = reply[0]
status["In-line valve state"] = reply[1]
status["Coolant valve state"] = reply[2]
status["Vent valve state"] = reply[3]
status["Mode"] = reply[4]
status["Controller state"] = reply[5]
return status
except IndexError:
raise ValueError(
"Value Error. Reply does not conform to format. Received: \"{0}\".".format(reply)
)
@property
@command
def end_vacuum_sp(self):
"""
Reads the set point (target) for the switch off vacuum in mode Auto
:return: call back to send_message with a request to return a value
"""
return self.send_message(self.GET_SWITCHING_OFF_VAC_3, True, self.getanswer)
@end_vacuum_sp.setter
@command
def end_vacuum_sp(self, vacuum=None):
"""
Sets the switch off vacuum and returns the set point from the pump so the user can verify that it was successful
:param vacuum: (integer) the target vacuum
"""
try:
# type checking of the vacuum the user provided
vacuum = int(vacuum)
except ValueError:
raise (ValueError("Error setting vacuum. Vacuum was not a valid integer \"{0}\"".format(vacuum)))
# actually sending the command
reply = self.send_message("{0} {1}".format(self.SET_SWITCHING_OFF_VAC_3, vacuum), True, self.setanswer)
try:
reply_value = int(reply[0])
except ValueError:
raise (ValueError("Error setting vacuum. Reply was not a valid integer \"{0}\"".format(reply[0])))
if reply_value != vacuum:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Send: \"{0}\". "
"Received: \"{1}\".".format(vacuum, reply_value)
)
@property
@command
def runtime_sp(self):
"""
Reads the set point (target) for the run time in mode Auto
:return: call back to send_message with a request to return a value
"""
return self.send_message(self.GET_RUNTIME_3, True, self.timeanswer)
@runtime_sp.setter
def runtime_sp(self, time=None):
"""
Sets the runtime and returns the set point from the pump so the user can verify that it was successful
:param time: (integer) the desired runtime
"""
# type checking the input
if not self.timepattern.fullmatch(time): # this is actually too conservative since the pump can deal with integers, but better confine an idiot to a smaller space than let him roam free
raise (ValueError("Error setting runtime. Runtime did not match the pattern: \"{0}\"".format(time)))
# actually sending the command
reply = self.send_message("{0} {1}".format(self.SET_RUNTIME_3, time), True, self.timeanswer)
if reply[0] != time:
raise ValueError(
"Value Error. Vacuum pump did not return correct return code. Send: \"{0}\". "
"Received: \"{1}\".".format(time, reply[0])
)
if __name__ == '__main__':
p = CVC3000(port="COM3")
p.open_connection()
p.initialise()
print("Vacuum sp {}".format(p.vacuum_sp))
p.vacuum_sp = 200
print(p.vacuum_sp)
p.start()
sleep(2)
p.stop()
sleep(2)
p.vent()
print(p.query_status())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/9/12 18:17
# @Author : JackyLUO
# @E-mail : lingluo@stumail.neu.edu.cn
# @Site :
# @File : eval.py
# @Software: PyCharm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
# files = ["Drishti-GS-results.csv", "RIM-ONE_r3-results.csv"]
files = ['refuge-test-results.csv']
FPRs = []
TPRs = []
AUCs = []
for i , file in enumerate(files):
df_cdr = pd.read_csv(file, usecols=['CDR'])
df_glau = pd.read_csv(file, usecols=['Glaucoma'])
df_cdr = df_cdr.values.tolist()
df_glau = df_glau.values.tolist()
fpr, tpr, _ = roc_curve(df_glau, df_cdr)
roc_auc = auc(fpr, tpr)
FPRs.append(fpr)
TPRs.append(tpr)
AUCs.append(roc_auc)
# df = pd.DataFrame({'FPR': fpr, 'TPR': tpr})
# df.to_csv(str(i) + "res.csv")
for fpr, tpr, roc_auc in zip(FPRs, TPRs, AUCs):
plt.plot(fpr, tpr, color='darkorange',
lw=2, label='ROC curve (area = %0.4f)' % roc_auc)
plt.xlim([0.0, 0.5])
plt.ylim([0.5, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
|
"""
The following classes are defined:
Register4
Register8
Register16
"""
from .. import wire
from .. import signal
from . import FLOP
Wire = wire.Wire
Bus4 = wire.Bus4
Bus8 = wire.Bus8
Bus16 = wire.Bus16
class Register4:
"""Construct a new 4-bit storage register.
Args:
data_bus: An object of type Bus4. The data input to the register.
enable: An object of type Wire. Enables the register.
clock: An object of type Wire or Clock. The clock input to the
register.
output_bus: An object of type Bus4. The output of the register. Takes
on the value of data_bus on the positive edges of clock if the
value of enable is 1.
Raises:
TypeError: If either data_bus or output_bus is not a bus of width 4.
"""
def __init__(self, data_bus, enable, clock, output_bus):
if len(data_bus) != 4:
raise TypeError(
"Expected bus of width 4, received bus of width {0}.".format(
len(data_bus)
)
)
if len(output_bus) != 4:
raise TypeError(
"Expected bus of width 4, received bus of width {0}.".format(
len(output_bus)
)
)
not_1 = Wire()
not_2 = Wire()
not_3 = Wire()
not_4 = Wire()
mux_bus = Bus4()
_Multiplexer2To1_4(enable, data_bus, output_bus, mux_bus)
FLOP.DFlipFlop(mux_bus[0], clock, output_bus[0], not_1)
FLOP.DFlipFlop(mux_bus[1], clock, output_bus[1], not_2)
FLOP.DFlipFlop(mux_bus[2], clock, output_bus[2], not_3)
FLOP.DFlipFlop(mux_bus[3], clock, output_bus[3], not_4)
self.data_bus = data_bus
self.enable = enable
self.clock = clock
self.output_bus = output_bus
def __str__(self):
str_ = ""
str_ += "data_bus: " + self.data_bus.__str__() + "\n"
str_ += "enable: " + str(self.enable.value) + "\n"
str_ += "clock: " + str(self.clock.value) + "\n"
str_ += "output_bus: " + self.output_bus.__str__()
return str_
def __call__(
self, *,
data_bus=None,
enable=None,
clock=None,
output_bus=None
):
if data_bus is not None:
self.data_bus.wire_values = data_bus
if enable is not None:
self.enable.value = enable
if clock is not None:
self.clock.value = clock
if output_bus is not None:
self.output_bus.wire_values = output_bus
class Register8:
"""Construct a new 8-bit storage register.
Args:
data_bus: An object of type Bus8. The data input to the register.
enable: An object of type Wire. Enables the register.
clock: An object of type Wire or Clock. The clock input to the
register.
output_bus: An object of type Bus8. The output of the register. Takes
on the value of data_bus on the positive edges of clock if the
value of enable is 1.
Raises:
TypeError: If either data_bus or output_bus is not a bus of width 8.
"""
def __init__(self, data_bus, enable, clock, output_bus):
if len(data_bus) != 8:
raise TypeError(
"Expected bus of width 8, received bus of width {0}.".format(
len(data_bus)
)
)
if len(output_bus) != 8:
raise TypeError(
"Expected bus of width 8, received bus of width {0}.".format(
len(output_bus)
)
)
not_1 = Wire()
not_2 = Wire()
not_3 = Wire()
not_4 = Wire()
not_5 = Wire()
not_6 = Wire()
not_7 = Wire()
not_8 = Wire()
mux_bus = Bus8()
_Multiplexer2To1_8(enable, data_bus, output_bus, mux_bus)
FLOP.DFlipFlop(mux_bus[0], clock, output_bus[0], not_1)
FLOP.DFlipFlop(mux_bus[1], clock, output_bus[1], not_2)
FLOP.DFlipFlop(mux_bus[2], clock, output_bus[2], not_3)
FLOP.DFlipFlop(mux_bus[3], clock, output_bus[3], not_4)
FLOP.DFlipFlop(mux_bus[4], clock, output_bus[4], not_5)
FLOP.DFlipFlop(mux_bus[5], clock, output_bus[5], not_6)
FLOP.DFlipFlop(mux_bus[6], clock, output_bus[6], not_7)
FLOP.DFlipFlop(mux_bus[7], clock, output_bus[7], not_8)
self.data_bus = data_bus
self.enable = enable
self.clock = clock
self.output_bus = output_bus
def __str__(self):
str_ = ""
str_ += "data_bus: " + self.data_bus.__str__() + "\n"
str_ += "enable: " + str(self.enable.value) + "\n"
str_ += "clock: " + str(self.clock.value) + "\n"
str_ += "output_bus: " + self.output_bus.__str__()
return str_
def __call__(
self, *,
data_bus=None,
enable=None,
clock=None,
output_bus=None
):
if data_bus is not None:
self.data_bus.wire_values = data_bus
if enable is not None:
self.enable.value = enable
if clock is not None:
self.clock.value = clock
if output_bus is not None:
self.output_bus.wire_values = output_bus
class Register16:
"""Construct a new 16-bit storage register.
Args:
data_bus: An object of type Bus16. The data input to the register.
enable: An object of type Wire. Enables the register.
clock: An object of type Wire or Clock. The clock input to the
register.
output_bus: An object of type Bus16. The output of the register. Takes
on the value of data_bus on the positive edges of clock if the
value of enable is 1.
Raises:
TypeError: If either data_bus or output_bus is not a bus of width 16.
"""
def __init__(self, data_bus, enable, clock, output_bus):
if len(data_bus) != 16:
raise TypeError(
"Expected bus of width 16, received bus of width {0}.".format(
len(data_bus)
)
)
if len(output_bus) != 16:
raise TypeError(
"Expected bus of width 16, received bus of width {0}.".format(
len(output_bus)
)
)
not_1 = Wire()
not_2 = Wire()
not_3 = Wire()
not_4 = Wire()
not_5 = Wire()
not_6 = Wire()
not_7 = Wire()
not_8 = Wire()
not_9 = Wire()
not_10 = Wire()
not_11 = Wire()
not_12 = Wire()
not_13 = Wire()
not_14 = Wire()
not_15 = Wire()
not_16 = Wire()
mux_bus = Bus16()
_Multiplexer2To1_16(enable, data_bus, output_bus, mux_bus)
FLOP.DFlipFlop(mux_bus[0], clock, output_bus[0], not_1)
FLOP.DFlipFlop(mux_bus[1], clock, output_bus[1], not_2)
FLOP.DFlipFlop(mux_bus[2], clock, output_bus[2], not_3)
FLOP.DFlipFlop(mux_bus[3], clock, output_bus[3], not_4)
FLOP.DFlipFlop(mux_bus[4], clock, output_bus[4], not_5)
FLOP.DFlipFlop(mux_bus[5], clock, output_bus[5], not_6)
FLOP.DFlipFlop(mux_bus[6], clock, output_bus[6], not_7)
FLOP.DFlipFlop(mux_bus[7], clock, output_bus[7], not_8)
FLOP.DFlipFlop(mux_bus[8], clock, output_bus[8], not_9)
FLOP.DFlipFlop(mux_bus[9], clock, output_bus[9], not_10)
FLOP.DFlipFlop(mux_bus[10], clock, output_bus[10], not_11)
FLOP.DFlipFlop(mux_bus[11], clock, output_bus[11], not_12)
FLOP.DFlipFlop(mux_bus[12], clock, output_bus[12], not_13)
FLOP.DFlipFlop(mux_bus[13], clock, output_bus[13], not_14)
FLOP.DFlipFlop(mux_bus[14], clock, output_bus[14], not_15)
FLOP.DFlipFlop(mux_bus[15], clock, output_bus[15], not_16)
self.data_bus = data_bus
self.enable = enable
self.clock = clock
self.output_bus = output_bus
def __str__(self):
str_ = ""
str_ += "data_bus: " + self.data_bus.__str__() + "\n"
str_ += "enable: " + str(self.enable.value) + "\n"
str_ += "clock: " + str(self.clock.value) + "\n"
str_ += "output_bus: " + self.output_bus.__str__()
return str_
def __call__(
self, *,
data_bus=None,
enable=None,
clock=None,
output_bus=None
):
if data_bus is not None:
self.data_bus.wire_values = data_bus
if enable is not None:
self.enable.value = enable
if clock is not None:
self.clock.value = clock
if output_bus is not None:
self.output_bus.wire_values = output_bus
class _Multiplexer2To1_4:
"""
This is an internal module for Register4. It multiplexes two 4-bit inputs
to a single 4-bit output.
"""
def __init__(
self,
select,
input_1_bus,
input_2_bus,
output_bus
):
vcc = Wire()
vcc.value = 1
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[0],
input_2_bus[0],
output_bus[0]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[1],
input_2_bus[1],
output_bus[1]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[2],
input_2_bus[2],
output_bus[2]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[3],
input_2_bus[3],
output_bus[3]
)
class _Multiplexer2To1_8:
"""
This is an internal module for Register8. It multiplexes two 8-bit inputs
to a single 8-bit output.
"""
def __init__(
self,
select,
input_1_bus,
input_2_bus,
output_bus
):
vcc = Wire()
vcc.value = 1
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[0],
input_2_bus[0],
output_bus[0]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[1],
input_2_bus[1],
output_bus[1]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[2],
input_2_bus[2],
output_bus[2]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[3],
input_2_bus[3],
output_bus[3]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[4],
input_2_bus[4],
output_bus[4]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[5],
input_2_bus[5],
output_bus[5]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[6],
input_2_bus[6],
output_bus[6]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[7],
input_2_bus[7],
output_bus[7]
)
class _Multiplexer2To1_16:
"""
This is an internal module for Register16. It multiplexes two 16-bit inputs
to a single 16-bit output.
"""
def __init__(
self,
select,
input_1_bus,
input_2_bus,
output_bus
):
vcc = Wire()
vcc.value = 1
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[0],
input_2_bus[0],
output_bus[0]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[1],
input_2_bus[1],
output_bus[1]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[2],
input_2_bus[2],
output_bus[2]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[3],
input_2_bus[3],
output_bus[3]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[4],
input_2_bus[4],
output_bus[4]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[5],
input_2_bus[5],
output_bus[5]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[6],
input_2_bus[6],
output_bus[6]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[7],
input_2_bus[7],
output_bus[7]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[8],
input_2_bus[8],
output_bus[8]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[9],
input_2_bus[9],
output_bus[9]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[10],
input_2_bus[10],
output_bus[10]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[11],
input_2_bus[11],
output_bus[11]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[12],
input_2_bus[12],
output_bus[12]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[13],
input_2_bus[13],
output_bus[13]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[14],
input_2_bus[14],
output_bus[14]
)
signal.Multiplexer2To1(
vcc,
select,
input_1_bus[15],
input_2_bus[15],
output_bus[15]
)
|
import unittest
from hamcrest import *
from src.students import Students
from key_generator.key_generator import generate
from src.exampleData import Data
class StudentsHamcrestTest(unittest.TestCase):
def setUp(self):
self.temp = Students(Data().example)
def test_add_student_positive(self):
result = ('Add student', 'Adam', 'Nowak', {'subjects': {}, 'remarks': {}})
assert_that(self.temp.addStudent(generate().get_key(), "Adam", "Nowak"), equal_to(result))
def test_add_the_same_students(self):
assert_that(calling(self.temp.addStudent)
.with_args(2, "Beata", "Jankowska"),
raises(Exception))
def test_add_student_surname_bad_type(self):
assert_that(calling(self.temp.addStudent)
.with_args(generate().get_key(), "Jan", 2),
raises(TypeError))
def test_add_student_name_bad_type(self):
assert_that(calling(self.temp.addStudent)
.with_args(generate().get_key(), True, "Kowalski"),
raises(TypeError))
def tearDown(self):
self.temp = None
|
from collections import Counter
def solveDay(myFile):
data = parseData(myFile)
print('Part 1: ', part1(data))
print('Part 2: ', part2(data))
def parseData(myFile):
return [[x.split() for x in line.split('|')]
for line in open(myFile).readlines()]
def part1(data):
return sum(
sum(len(seg) not in (5, 6) for seg in signal[1]) for signal in data)
def part2(data):
return sum(decoder(signal) for signal in data)
def decoder(signal):
start = {2:'1', 3:'7', 4:'4', 7:'8'}
decoded = {start[x]:set(sig) for sig in signal[0] if (x:=len(sig)) in start}
sixes = [set(sig) for sig in signal[0] if len(sig)==6]
decoded['6'] = [sig for sig in sixes if not (decoded['1'] < sig)] [0]
sixes.remove(decoded['6'])
decoded['9'] = [sig for sig in sixes if decoded['4'] < sig][0]
sixes.remove(decoded['9'])
decoded['0'] = sixes[0]
fives = [set(sig) for sig in signal[0] if len(sig)==5]
decoded['3'] = [sig for sig in fives if decoded['1'] < sig][0]
fives.remove(decoded['3'])
decoded['5'] = [sig for sig in fives if decoded['6'] > sig][0]
fives.remove(decoded['5'])
decoded['2'] = fives[0]
decoded = {''.join(sorted(v)):k for k,v in decoded.items()}
return int(''.join(decoded[''.join(sorted(sig))] for sig in signal [1]))
solveDay("./day1.txt")
|
# -*-coding:utf-8-*-
import re
from knlp.seq_labeling.crf.crf import CRFModel
from knlp.common.constant import KNLP_PATH
class Inference:
def __init__(self):
self.label_prediction = [] # ้ขๆตๆ ็ญพๅบๅ
self.out_sentence = [] # ้ขๆต็ปๆๅบๅ
def spilt_predict(self, in_put, file_path):
"""
ๅฐ่พๅ
ฅๅบๅๅๅฒไธบๅไธชๆฑๅญๅข๏ผไพๆฌก้ๅ
ฅ่พๅ
ฅ็้ข่ฎญ็ปๆจกๅไธญ๏ผ่ฟๅๅไธชๆฑๅญๅข็้ขๆต็ปๆใ
"""
crf = CRFModel()
re_zh, re_no_zh = re.compile("([\u4E00-\u9FA5]+)"), re.compile("[^a-zA-Z0-9+#\n]") # ๅชๅฏนๆฑๅญๅๅ่ฏ
processed_sentence = re_zh.split(in_put) # ๆ็
งๆฑๅญๅข่ฟ่กๅๅฒ
crf_model = crf.load_model(file_path)
for block in processed_sentence:
if re_zh.match(block): # ๅฏนๆฑๅญ่ฟ่กๅ่ฏ
blocking = list(block)
pred = [blocking]
crf_pred = crf_model.test(pred) # ้ขๆต
self.out_sentence.extend(self.cut(pred, crf_pred))
crf_pred = sum(crf_pred, [])
self.label_prediction.append(crf_pred)
else:
for char in re_no_zh.split(block): # ๆๅฉไธ็ๅญ็ฌฆๅๅบๆฅ
if char:
self.label_prediction.append(char)
self.out_sentence.append(char)
break
def cut(self, sentence1, sentence2):
"""
ๆ็
งBEMSๆ ็ญพๅไธญๆๅ่ฏ๏ผๅๅฒๅฅๅญใ
"""
out_sent = []
sen1 = sum(sentence1, [])
sen2 = sum(sentence2, [])
begin = 0
for idx in range(len(sen1)):
if sen2[idx] == 'B':
begin = idx
elif sen2[idx] == 'S':
str = "".join(sen1[idx])
out_sent.append(str)
elif sen2[idx] == 'E':
next = idx + 1
str = "".join(sen1[begin:next])
out_sent.append(str)
begin = 0
return out_sent
if __name__ == "__main__":
test = Inference()
CRF_MODEL_PATH = KNLP_PATH + "/knlp/model/crf/hanzi_segment.pkl"
print("่ฏปๅๆฐๆฎ...")
to_be_pred = "ๅฌๅคฉๅฐไบ๏ผๆฅๅคฉ่ฟไผ่ฟๅ๏ผ"
test.spilt_predict(to_be_pred, CRF_MODEL_PATH)
print("POS็ปๆ๏ผ" + str(test.label_prediction))
print("ๆจกๅ้ขๆต็ปๆ๏ผ" + str(test.out_sentence))
|
import torch
import numpy as np
from ..models import *
from ..data.two_dim import two_dim_ds
from ..data.mnist import MNIST
#from ..data.emnist import EMNIST
from ..data.celeba import CelebA
from .plotters import TwoDPlotter, ImPlotter
from torch.utils.data import TensorDataset
import torchvision.transforms as transforms
import os
from bridge.data import repeater
from torch.utils.data import DataLoader
import torch.distributed as dist
from torchvision.datasets import CIFAR10
cmp = lambda x: transforms.Compose([*x])
def get_plotter(runner, args):
dataset_tag = getattr(args, DATASET)
if dataset_tag == DATASET_2D:
return TwoDPlotter(nit=runner.nit, gammas=runner.langevin.gammas)
else:
return ImPlotter(plot_level = args.plot_level)
# Model
#--------------------------------------------------------------------------------
MODEL = 'Model'
BASIC_MODEL = 'Basic'
UNET_MODEL = 'UNET'
def get_model(args):
model_tag = getattr(args, MODEL)
if model_tag == BASIC_MODEL:
net = ScoreNetwork()
if model_tag == UNET_MODEL:
image_size=args.data.image_size
if image_size == 256:
channel_mult = (1, 1, 2, 2, 4, 4)
elif image_size == 64:
channel_mult = (1, 2, 3, 4)
elif image_size == 32:
channel_mult = (1, 2, 2, 2)
elif image_size == 28:
channel_mult = (1, 2, 2)
else:
raise ValueError(f"unsupported image size: {image_size}")
attention_ds = []
for res in args.model.attention_resolutions.split(","):
attention_ds.append(image_size // int(res))
kwargs = {
"in_channels": args.data.channels,
"model_channels": args.model.num_channels,
"out_channels": args.data.channels,
"num_res_blocks": args.model.num_res_blocks,
"attention_resolutions": tuple(attention_ds),
"dropout": args.model.dropout,
"channel_mult": channel_mult,
"num_classes": args.num_data_classes,
"use_checkpoint": args.model.use_checkpoint,
"num_heads": args.model.num_heads,
"num_heads_upsample": args.model.num_heads_upsample,
"use_scale_shift_norm": args.model.use_scale_shift_norm
}
net = UNetModel(**kwargs)
return net
# Optimizer
#--------------------------------------------------------------------------------
def get_optimizers(net_f, net_b, lr):
return torch.optim.Adam(net_f.parameters(), lr=lr), torch.optim.Adam(net_b.parameters(), lr=lr)
# Dataset
#--------------------------------------------------------------------------------
DATASET = 'Dataset'
DATASET_TRANSFER = 'Dataset_transfer'
DATASET_2D = '2d'
DATASET_CELEBA = 'celeba'
DATASET_STACKEDMNIST = 'stackedmnist'
DATASET_CIFAR10 = 'cifar10'
DATASET_EMNIST = 'emnist'
def get_datasets(args):
dataset_tag = getattr(args, DATASET)
if args.transfer:
dataset_transfer_tag = getattr(args, DATASET_TRANSFER)
else:
dataset_transfer_tag = None
# INITIAL (DATA) DATASET
# 2D DATASET
if dataset_tag == DATASET_2D:
data_tag = args.data
npar = max(args.npar, args.cache_npar)
init_ds = two_dim_ds(npar, data_tag)
if dataset_transfer_tag == DATASET_2D:
data_tag = args.data_transfer
npar = max(args.npar, args.cache_npar)
final_ds = two_dim_ds(npar, data_tag)
mean_final = torch.tensor(0.)
var_final = torch.tensor(1.*10**3) #infty like
# CELEBA DATASET
if dataset_tag == DATASET_CELEBA:
train_transform = [transforms.CenterCrop(140), transforms.Resize(args.data.image_size), transforms.ToTensor()]
test_transform = [transforms.CenterCrop(140), transforms.Resize(args.data.image_size), transforms.ToTensor()]
if args.data.random_flip:
train_transform.insert(2, transforms.RandomHorizontalFlip())
root = os.path.join(args.data_dir, 'celeba')
init_ds = CelebA(root, split='train', transform=cmp(train_transform), download=False)
# MNIST DATASET
if dataset_tag == DATASET_STACKEDMNIST:
root = os.path.join(args.data_dir, 'mnist')
saved_file = os.path.join(root, "data.pt")
load = os.path.exists(saved_file)
load = args.load
init_ds = MNIST(root, load=load, source_root=root,
train=True, num_channels = args.data.channels,
imageSize=args.data.image_size,
device=args.device)
if dataset_transfer_tag == DATASET_STACKEDMNIST:
root = os.path.join(args.data_dir, 'mnist')
saved_file = os.path.join(root, "data.pt")
load = os.path.exists(saved_file)
load = args.load
final_ds = MNIST(root, load=load, source_root=root,
train=True, num_channels = args.data.channels,
imageSize=args.data.image_size,
device=args.device)
mean_final = torch.tensor(0.)
var_final = torch.tensor(1.*10**3)
# EMNIST DATASET
if dataset_tag == DATASET_EMNIST:
root = os.path.join(args.data_dir, 'EMNIST')
saved_file = os.path.join(root, "data.pt")
load = os.path.exists(saved_file)
load = args.load
init_ds = EMNIST(root, load=load, source_root=root,
train=True, num_channels = args.data.channels,
imageSize=args.data.image_size,
device=args.device)
if dataset_transfer_tag == DATASET_EMNIST:
root = os.path.join(args.data_dir, 'EMNIST')
saved_file = os.path.join(root, "data.pt")
load = os.path.exists(saved_file)
load = args.load
final_ds = EMNIST(root, load=load, source_root=root,
train=True, num_channels = args.data.channels,
imageSize=args.data.image_size,
device=args.device)
mean_final = torch.tensor(0.)
var_final = torch.tensor(1.*10**3)
# CIFAR 10 DATASET
if dataset_tag == DATASET_CIFAR10:
train_transform = [transforms.Resize(args.data.image_size), transforms.ToTensor()]
test_transform = [transforms.Resize(args.data.image_size), transforms.ToTensor()]
if args.data.random_flip:
train_transform.insert(1, transforms.RandomHorizontalFlip())
path = os.path.join(args.data_dir, 'CIFAR10')
init_ds = CIFAR10(path, train=True, download=True, transform=cmp(train_transform))
#test_dataset = CIFAR10(path, train=False, download=True, transform=cmp(test_transform))
# FINAL (GAUSSIAN) DATASET (if no transfer)
if not(args.transfer):
if args.adaptive_mean:
NAPPROX = 100
vec = next(iter(DataLoader(init_ds, batch_size=NAPPROX)))[0]
mean_final = vec.mean()
mean_final = vec[0] * 0 + mean_final
var_final = eval(args.var_final)
final_ds = None
elif args.final_adaptive:
NAPPROX = 100
vec = next(iter(DataLoader(init_ds, batch_size=NAPPROX)))[0]
mean_final = vec.mean(axis=0)
var_final = vec.var()
final_ds = None
else:
mean_final = eval(args.mean_final)
var_final = eval(args.var_final)
final_ds = None
return init_ds, final_ds, mean_final, var_final
def get_schedule(args):
num_diffusion_timesteps = args.nit
n = num_diffusion_timesteps//2
if args.gamma_space == 'cosine':
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
max_beta=args.gamma_max
)
elif args.gamma_space == 'linspace':
gamma_half = np.linspace(args.gamma_min,args.gamma_max, n)
return np.concatenate([gamma_half, np.flip(gamma_half)])
elif args.gamma_space == 'geomspace':
gamma_half = np.geomspace(args.gamma_min, args.gamma_max, n)
return np.concatenate([gamma_half, np.flip(gamma_half)])
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
def get_dataloader(args, batch_size = None):
if batch_size is None:
batch_size = args.batch_size
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id + dist.get_rank())
kwargs = {"num_workers": args.num_workers,
"pin_memory": args.pin_memory,
"worker_init_fn": worker_init_fn,
"drop_last": True}
init_ds, final_ds, mean_final, var_final = get_datasets(args)
data_loader = repeater(DataLoader(init_ds, batch_size=batch_size, shuffle=True, **kwargs))
data_loader = repeater(data_loader)
return data_loader, mean_final, var_final
|
import os, sys, subprocess
sys.path.append(os.path.dirname(__file__) + "/../lib")
from test_helper import create_virtenv, run_test
ENV_NAME = "formencode_test_env_" + os.path.basename(sys.executable)
SRC_DIR = os.path.abspath(os.path.join(ENV_NAME, "src"))
PYTHON_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "python"))
NOSETESTS_EXE = os.path.abspath(os.path.join(ENV_NAME, "bin", "nosetests"))
FORMENCODE_DIR = os.path.abspath(os.path.join(ENV_NAME, "src", "formencode"))
packages = ["nose==1.3.7", "pycountry==1.6", "pyDNS==2.3.6"]
packages += ["-e", "git+https://github.com/formencode/formencode.git@1.2.5#egg=formencode"]
create_virtenv(ENV_NAME, packages, force_create = True)
subprocess.check_call(["patch", "-p1"], stdin=open(os.path.join(os.path.dirname(__file__), "formencode.patch")), cwd=SRC_DIR)
expected = [{'ran': 201}]
expected_log_hash = '''
gAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAgAAAAAAA
AAAAAAAAAAAAAAAAAAAAAAAAAAgAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
AAAAAAAAAAAAAAAAAAA=
'''
run_test([NOSETESTS_EXE], cwd=FORMENCODE_DIR, expected=expected, expected_log_hash=expected_log_hash)
|
import asyncio
from nats.aio.client import Client as NATS
from nats.errors import ConnectionClosedError, TimeoutError
async def main():
nc = NATS()
try:
# It is very likely that the demo server will see traffic from clients other than yours.
# To avoid this, start your own locally and modify the example to use it.
# await nc.connect(servers=["nats://127.0.0.1:4222"])
await nc.connect(servers=["nats://demo.nats.io:4222"])
except:
pass
async def message_handler(msg):
print(f"[Received on '{msg.subject}']: {msg.data.decode()}")
try:
# Interested in receiving 2 messages from the 'discover' subject.
sub = await nc.subscribe("discover", "", message_handler)
await sub.unsubscribe(2)
await nc.publish("discover", b'hello')
await nc.publish("discover", b'world')
# Following 2 messages won't be received.
await nc.publish("discover", b'again')
await nc.publish("discover", b'!!!!!')
except ConnectionClosedError:
print("Connection closed prematurely")
async def request_handler(msg):
print("[Request on '{} {}']: {}".format(msg.subject, msg.reply,
msg.data.decode()))
await nc.publish(msg.reply, b'OK')
if nc.is_connected:
# Subscription using a 'workers' queue so that only a single subscriber
# gets a request at a time.
await nc.subscribe("help", "workers", cb=request_handler)
try:
# Make a request expecting a single response within 500 ms,
# otherwise raising a timeout error.
msg = await nc.request("help", b'help please', 0.500)
print(f"[Response]: {msg.data}")
# Make a roundtrip to the server to ensure messages
# that sent messages have been processed already.
await nc.flush(0.500)
except ErrTimeout:
print("[Error] Timeout!")
# Wait a bit for message to be dispatched...
await asyncio.sleep(1)
# Detach from the server.
await nc.close()
if nc.last_error is not None:
print(f"Last Error: {nc.last_error}")
if nc.is_closed:
print("Disconnected.")
if __name__ == '__main__':
asyncio.run(main())
|
from __future__ import unicode_literals
import json
from django import template
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse, HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.template import RequestContext, Template
from django.views.debug import ExceptionReporter
from django.views.decorators.http import require_POST
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import render
from .forms import ScribbleForm, PreviewForm, FieldScribbleForm
from .models import Scribble
from .utils import get_variables
def build_scribble_context(scribble):
"Create context for rendering a scribble or scribble preview."
context = {
'scribble': scribble,
}
return context
@require_POST
def preview_scribble(request, ct_pk):
"Render scribble content or return error information."
if not request.user.is_authenticated():
return HttpResponseForbidden()
content_type = get_object_or_404(ContentType, pk=ct_pk)
change_scribble = '{0}.change_{1}'.format(
content_type.app_label, content_type.model)
add_scribble = '{0}.add_{1}'.format(
content_type.app_label, content_type.model)
can_edit = request.user.has_perm(change_scribble)
can_create = request.user.has_perm(add_scribble)
if not (can_edit or can_create):
return HttpResponseForbidden()
results = {
'valid': False,
'html': '',
}
form = PreviewForm(request.POST)
if form.is_valid():
results['valid'] = True
if hasattr(template, 'engines'):
scribbler_template = template.engines['django'].from_string(form.cleaned_data.get('content', ''))
else:
scribbler_template = template.Template(form.cleaned_data.get('content', ''))
context = build_scribble_context(form.instance)
results['html'] = scribbler_template.render(context, request)
results['variables'] = get_variables(RequestContext(request, context))
else:
if hasattr(form, 'exc_info'):
# Pre Django 1.9
try:
exc_type, exc_value, tb = form.exc_info
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
reporter.get_template_exception_info()
results['error'] = reporter.template_info
# Django >= 1.9: get_template_info() is moved from ExceptionReporter
# onto Template. We pass the data it returns from scribbler/forms.py
# to here.
except (ValueError, AttributeError):
# ValueError is raised when we pass in all 12 the arguments,
# in form.exc_info and AttributeError is raised when
# ExceptionReporter.get_template_exception_info() is called.
results['error'] = form.exc_info
else:
# Not sure what to do here
results['error'] = {
'message': 'Content is not valid',
'line': '',
}
content = json.dumps(results, cls=DjangoJSONEncoder, ensure_ascii=False)
return HttpResponse(content, content_type='application/json')
@require_POST
def create_edit_scribble(request, scribble_id=None):
"Create a new Scribble or edit an existing one."
if not request.user.is_authenticated():
return HttpResponseForbidden()
if scribble_id is not None:
scribble = get_object_or_404(Scribble, pk=scribble_id)
if not request.user.has_perm('scribbler.change_scribble'):
return HttpResponseForbidden()
else:
scribble = Scribble()
if not request.user.has_perm('scribbler.add_scribble'):
return HttpResponseForbidden()
form = ScribbleForm(request.POST, instance=scribble)
results = {
'valid': False,
}
if form.is_valid():
results['valid'] = True
scribble = form.save()
results['url'] = scribble.get_save_url()
content = json.dumps(results, cls=DjangoJSONEncoder, ensure_ascii=False)
return HttpResponse(content, content_type='application/json')
@require_POST
def edit_scribble_field(request, ct_pk, instance_pk, field_name):
if not request.user.is_authenticated():
return HttpResponseForbidden()
content_type = get_object_or_404(ContentType, pk=ct_pk)
perm_name = '{0}.change_{1}'.format(content_type.app_label, content_type.model)
if not request.user.has_perm(perm_name):
return HttpResponseForbidden()
form = FieldScribbleForm(content_type, instance_pk, field_name, data=request.POST)
results = {
'valid': False,
}
if form.is_valid():
results['valid'] = True
form.save()
else:
results['error'] = {
'message': ','.join('%s' % e for e in form.errors.values()),
'line': '',
}
results['url'] = form.get_save_url()
content = json.dumps(results, cls=DjangoJSONEncoder, ensure_ascii=False)
return HttpResponse(content, content_type='application/json')
@require_POST
def delete_scribble(request, scribble_id):
"Delete an existing scribble."
if not request.user.is_authenticated():
return HttpResponseForbidden()
scribble = get_object_or_404(Scribble, pk=scribble_id)
if not request.user.has_perm('scribbler.delete_scribble'):
return HttpResponseForbidden()
scribble.delete()
results = {
'valid': True,
'url': scribble.get_save_url()
}
content = json.dumps(results, cls=DjangoJSONEncoder, ensure_ascii=False)
return HttpResponse(content, content_type='application/json')
|
#!/usr/bin/env python
# -*- encoding=utf8 -*-
#
# File: cmd.py
#
# Copyright (C) 2013 Hsin-Yi Chen (hychen)
# Author(s): Hsin-Yi Chen (hychen) <ossug.hychen@gmail.com>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import sys
from boliau import cmdlib
from boliau.plugins.tpl import actionlib
def do_sub():
cmd = cmdlib.as_command(actionlib.Sub())
cmd.add_argument('tplpath')
cmd.add_argument('--output', nargs='?')
cmd.add_argument('--var', nargs=2, action='append')
cmd.add_argument('--mvar', nargs=2, action='append')
args = cmd.parse_argv()
print cmd.call(args)
|
def Sum(num):
return sum(range(1,num+1))
def main():
num = input("Please enter your number to sum: ")
print(Sum(int(num)))
if __name__ == "__main__":
main()
|
from django.db.models.signals import post_delete
from django.dispatch import Signal, receiver
from baserow.contrib.database.table.cache import (
invalidate_table_model_cache_and_related_models,
)
from baserow.contrib.database.table.models import Table
table_created = Signal()
table_updated = Signal()
table_deleted = Signal()
tables_reordered = Signal()
@receiver(post_delete, sender=Table)
def invalidate_model_cache_when_table_deleted(sender, instance, **kwargs):
invalidate_table_model_cache_and_related_models(instance.id)
|
import numpy as np
from wavespectra.construct.helpers import (spread, check_coordinates,
arrange_inputs, make_dataset)
def jonswap(tp, dp, alpha, gamma=3.3, dspr=20, freqs=np.arange(0.04,1.0,0.02),
dirs=np.arange(0,360,10), coordinates=[], sumpart=True):
"""Constructs JONSWAP spectra from peak period and peak direction."""
check_coordinates(tp,coordinates)
#Arrange inputs
tp_m, dp_m, alpha_m, gamma_m, dspr_m = arrange_inputs(tp, dp, alpha, gamma, dspr)
f = freqs.reshape((-1,1))
#Calculate JONSWAP
fp = 1.0 / np.array(tp_m)
sig = np.where(f<=fp, 0.07, 0.09)
r = np.exp(-(f-fp)**2. / (2 * sig**2 * fp**2))
S = 0.0617 * alpha_m * f**(-5) * np.exp(-1.25*(f/fp)**(-4)) * gamma_m**r
#Apply spreading
G1 = spread(dp_m, dspr_m, dirs)
spec = S * G1
# sum partitions
if sumpart:
idimpart = [i for i, t in enumerate(coordinates) if t[0]=='part']
if idimpart:
spec = np.sum(spec, axis=idimpart[0])
coordinates.pop(idimpart[0])
return make_dataset(spec, freqs, dirs, coordinates)
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import re
import sys
import isharp.datahub.yaml_support as iYaml
import nameko.cli.main
import isharp.datahub.web.webconsole as web
from multiprocessing import Process
def main():
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
iYaml.set_up_unsafe_loader()
sys.exit(nameko.cli.main.main())
if __name__== "__main__" :
main()
|
# ABBA reversed is ABBA, print all such palindromes
user_input = input().split()
result = [word for word in user_input if word == word[::-1]]
result = list(set(result))
#result.sort(key = str.lower)
result.sort(key = str.upper)
print(*result, sep = ', ')
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import os
from PIL import Image
from torchvision import transforms
from towhee.trainer.models.vit.vit import ViT
from tests.unittests.mock_operators import PYTORCH_TRANSFORMER_OPERATOR_PATH, load_local_operator
from tests.unittests import VIT_DIR
class TransformerOperatorTest(unittest.TestCase):
name = 'B_16_imagenet1k'
test_dir = VIT_DIR
#weights_path = test_dir + 'B_16_imagenet1k.pth'
weights_path = None
model = ViT(name, weights_path=weights_path, pretrained=True)
img = Image.open(os.path.join(test_dir, 'img.jpg'))
tfms = transforms.Compose([transforms.Resize(model.image_size), transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ])
img = tfms(img).unsqueeze(0)
with open(os.path.join(test_dir, 'labels_map.txt'), encoding='utf-8') as handler:
labels_map = json.load(handler)
_labels_map = []
for i in range(1000):
_labels_map.append(labels_map[str(i)])
labels_map = _labels_map
args = {'topk': 1, 'labels_map': labels_map}
def test_transformer_operator(self):
trans = load_local_operator(
'pytorch_transformer_operator', PYTORCH_TRANSFORMER_OPERATOR_PATH)
op = trans.PytorchTransformerOperator(self.model, self.args)
outputs = op(self.img)
self.assertEqual(
'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca', outputs.predict)
if __name__ == '__main__':
unittest.main()
|
from typing import List, Dict, Set
from nuscenes.map_expansion.map_api import NuScenesMap
from nuscenes.nuscenes import NuScenes
def get_egoposes_on_drivable_ratio(nusc: NuScenes, nusc_map: NuScenesMap, scene_token: str) -> float:
"""
Get the ratio of ego poses on the drivable area.
:param nusc: A NuScenes instance.
:param nusc_map: The NuScenesMap instance of a particular map location.
:param scene_token: The token of the current scene.
:return: The ratio of poses that fall on the driveable area.
"""
# Go through each sample in the scene.
sample_tokens = nusc.field2token('sample', 'scene_token', scene_token)
poses_all = 0
poses_valid = 0
for sample_token in sample_tokens:
# Poses are associated with the sample_data. Here we use the lidar sample_data.
sample_record = nusc.get('sample', sample_token)
sample_data_record = nusc.get('sample_data', sample_record['data']['LIDAR_TOP'])
pose_record = nusc.get('ego_pose', sample_data_record['ego_pose_token'])
# Check if the ego pose is on the driveable area.
ego_pose = pose_record['translation'][:2]
record = nusc_map.record_on_point(ego_pose[0], ego_pose[1], 'drivable_area')
if len(record) > 0:
poses_valid += 1
poses_all += 1
ratio_valid = poses_valid / poses_all
return ratio_valid
def get_disconnected_subtrees(connectivity: Dict[str, dict]) -> Set[str]:
"""
Compute lanes or lane_connectors that are part of disconnected subtrees.
:param connectivity: The connectivity of the current NuScenesMap.
:return: The lane_tokens for lanes that are part of a disconnected subtree.
"""
# Init.
connected = set()
pending = set()
# Add first lane.
all_keys = list(connectivity.keys())
first_key = all_keys[0]
all_keys = set(all_keys)
pending.add(first_key)
while len(pending) > 0:
# Get next lane.
lane_token = pending.pop()
connected.add(lane_token)
# Add lanes connected to this lane.
if lane_token in connectivity:
incoming = connectivity[lane_token]['incoming']
outgoing = connectivity[lane_token]['outgoing']
inout_lanes = set(incoming + outgoing)
for other_lane_token in inout_lanes:
if other_lane_token not in connected:
pending.add(other_lane_token)
disconnected = all_keys - connected
assert len(disconnected) < len(connected), 'Error: Bad initialization chosen!'
return disconnected
def drop_disconnected_lanes(nusc_map: NuScenesMap) -> NuScenesMap:
"""
Remove any disconnected lanes.
Note: This function is currently not used and we do not recommend using it. Some lanes that we do not drive on are
disconnected from the other lanes. Removing them would create a single connected graph. It also removes
meaningful information, which is why we do not drop these.
:param nusc_map: The NuScenesMap instance of a particular map location.
:return: The cleaned NuScenesMap instance.
"""
# Get disconnected lanes.
disconnected = get_disconnected_lanes(nusc_map)
# Remove lane.
nusc_map.lane = [lane for lane in nusc_map.lane if lane['token'] not in disconnected]
# Remove lane_connector.
nusc_map.lane_connector = [lane for lane in nusc_map.lane_connector if lane['token'] not in disconnected]
# Remove connectivity entries.
for lane_token in disconnected:
if lane_token in nusc_map.connectivity:
del nusc_map.connectivity[lane_token]
# Remove arcline_path_3.
for lane_token in disconnected:
if lane_token in nusc_map.arcline_path_3:
del nusc_map.arcline_path_3[lane_token]
# Remove connectivity references.
empty_connectivity = []
for lane_token, connectivity in nusc_map.connectivity.items():
connectivity['incoming'] = [i for i in connectivity['incoming'] if i not in disconnected]
connectivity['outgoing'] = [o for o in connectivity['outgoing'] if o not in disconnected]
if len(connectivity['incoming']) + len(connectivity['outgoing']) == 0:
empty_connectivity.append(lane_token)
for lane_token in empty_connectivity:
del nusc_map.connectivity[lane_token]
# To fix the map class, we need to update some indices.
nusc_map._make_token2ind()
return nusc_map
def get_disconnected_lanes(nusc_map: NuScenesMap) -> List[str]:
"""
Get a list of all disconnected lanes and lane_connectors.
:param nusc_map: The NuScenesMap instance of a particular map location.
:return: A list of lane or lane_connector tokens.
"""
disconnected = set()
for lane_token, connectivity in nusc_map.connectivity.items():
# Lanes which are disconnected.
inout_lanes = connectivity['incoming'] + connectivity['outgoing']
if len(inout_lanes) == 0:
disconnected.add(lane_token)
continue
# Lanes that only exist in connectivity (not currently an issue).
for inout_lane_token in inout_lanes:
if inout_lane_token not in nusc_map._token2ind['lane'] and \
inout_lane_token not in nusc_map._token2ind['lane_connector']:
disconnected.add(inout_lane_token)
# Lanes that are part of disconnected subtrees.
subtrees = get_disconnected_subtrees(nusc_map.connectivity)
disconnected = disconnected.union(subtrees)
return sorted(list(disconnected))
|
from PIL import Image, ImageDraw, ImageColor, ImageFont
import cv2
import skimage
import matplotlib.pyplot as plt
import numpy as np
import string
import random
import json
import pprint
import generate_code
image_width = 1920 # in pixel
image_height = 1080 # in pixel
border_padding = 100
text_color = "white"
font_type = "Arial.ttf"
px_pt_ratio = 20/29 # according to our image dimensions, 29 point = 20 px
font_size = 30 # in pixel - described in paper
num_sentinel_images = 44
num_buckets = 11 # last 3 buckets only used for tutorial
def pixel_to_point(num):
return int(num*(1/px_pt_ratio))
correct_code = {}
img_count = 1
for i in range(num_sentinel_images):
# generate random code chart
filename, valid_codes, coordinates = generate_code.create_codechart('/sentinel/sentinel_code_chart_' + str(img_count))
# pick random code
r = list(range(0, len(valid_codes)))
index = random.choice(r)
triplet = valid_codes[index]
triplet_coordinate = coordinates[triplet]
# to make sure that the cross is visible
while (triplet_coordinate[0] <= border_padding or triplet_coordinate[0] >= image_width-border_padding) or (triplet_coordinate[1] <= border_padding or triplet_coordinate[1] >=image_height-border_padding):
index = random.choice(r)
triplet = valid_codes[index]
triplet_coordinate = coordinates[triplet]
coordinate = (triplet_coordinate[0]+20, triplet_coordinate[1])
# create sentinel image
img = Image.new('RGB', (image_width, image_height), (126, 126, 126))
d = ImageDraw.Draw(img)
txt_color = ImageColor.getrgb(text_color)
font = ImageFont.truetype(font_type, pixel_to_point(font_size)) # takes in point value
d.text(coordinate, '+', txt_color, font)
# Stores sentinel images into its correct bucket
bucket_count = 1
bucket_dict = {}
for num in range(0, num_sentinel_images, int(num_sentinel_images/num_buckets)):
val = 'bucket' + str(bucket_count)
for x in range(int(num_sentinel_images/num_buckets)):
bucket_dict[num+x+1] = val
bucket_count+=1
bucket = bucket_dict[img_count]
filename = './sentinel_images/' + bucket + '/sentinel_image_' + str(img_count) + '.jpg'
img.save(filename)
correct_code[filename] = (triplet, coordinate, valid_codes)
img_count+=1
with open('./sentinel_images/sentinel_codes.json', 'w') as outfile:
json.dump(correct_code, outfile)
## FIXATION CROSS CODE ##
# img = Image.new('RGB', (image_width, image_height), (126, 126, 126))
# d = ImageDraw.Draw(img)
# txt_color = ImageColor.getrgb(text_color)
# font = ImageFont.truetype(font_type, pixel_to_point(font_size)) # takes in point value
# d.text((940, 520), '+', txt_color, font)
# filename = 'sentinel_images/test.jpg'
# img.save(filename)
|
#!/usr/bin/python3
import pytest
from brownie.exceptions import IncompatibleEVMVersion, VirtualMachineError
from brownie.network.contract import ProjectContract
from brownie.network.transaction import TransactionReceipt
def test_returns_contract_on_success(BrownieTester, accounts):
"""returns a Contract instance on successful deployment"""
c = accounts[0].deploy(BrownieTester, True)
assert type(c) == ProjectContract
def test_raises_on_revert(BrownieTester, accounts):
"""raises on revert if not in console"""
with pytest.raises(VirtualMachineError):
accounts[0].deploy(BrownieTester, False)
def test_returns_tx_on_revert_in_console(BrownieTester, accounts, console_mode):
"""returns a TransactionReceipt instance on revert in the console"""
tx = accounts[0].deploy(BrownieTester, False)
assert type(tx) == TransactionReceipt
assert tx.status == 0
def test_nonce(BrownieTester, accounts, rpc):
"""nonces increment properly"""
assert accounts[0].nonce == 0
accounts[0].deploy(BrownieTester, True)
assert accounts[0].nonce == 1
rpc.reset()
assert accounts[0].nonce == 0
def test_gas_price_manual(BrownieTester, accounts):
"""gas price is set correctly when specified in the call"""
balance = accounts[0].balance()
tx = accounts[0].deploy(BrownieTester, True, gas_price=100).tx
assert tx.gas_price == 100
assert accounts[0].balance() == balance - (tx.gas_used * 100)
def test_gas_price_automatic(BrownieTester, accounts, config, web3):
"""gas price is set correctly using web3.eth.gasPrice"""
config["active_network"]["gas_price"] = False
balance = accounts[0].balance()
tx = accounts[0].deploy(BrownieTester, True).tx
assert tx.gas_price == web3.eth.gasPrice
assert accounts[0].balance() == balance - (tx.gas_price * tx.gas_used)
def test_gas_price_config(BrownieTester, accounts, config, web3):
"""gas price is set correctly from the config"""
config["active_network"]["gas_price"] = 50
balance = accounts[0].balance()
tx = accounts[0].deploy(BrownieTester, True).tx
assert tx.gas_price == 50
assert accounts[0].balance() == balance - (50 * tx.gas_used)
def test_gas_limit_manual(BrownieTester, accounts):
"""gas limit is set correctly when specified in the call"""
tx = accounts[0].deploy(BrownieTester, True, gas_limit=3000000).tx
assert tx.gas_limit == 3000000
def test_gas_limit_automatic(BrownieTester, accounts, config):
"""gas limit is set correctly using web3.eth.estimateGas"""
config["active_network"]["gas_limit"] = False
tx = accounts[0].deploy(BrownieTester, True).tx
assert tx.gas_limit == tx.gas_used
def test_gas_limit_config(BrownieTester, accounts, config):
"""gas limit is set correctly from the config"""
config["active_network"]["gas_limit"] = 5000000
tx = accounts[0].deploy(BrownieTester, True).tx
assert tx.gas_limit == 5000000
config["active_network"]["gas_limit"] = False
def test_evm_version(BrownieTester, accounts, monkeypatch):
monkeypatch.setattr("psutil.Popen.cmdline", lambda s: ["-k", "byzantium"])
with pytest.raises(IncompatibleEVMVersion):
accounts[0].deploy(BrownieTester, True)
|
'''
Generate a random number of length, N and all digits
are unique
'''
from __future__ import print_function
import random
from collections import OrderedDict
# keep generating till all are unique
# This is a brute force approach where I store the digits generated
# so far in a OrderedDict and if the next random number is already
# there, i ignore it.
def randN(n):
assert n <= 10
digits = OrderedDict()
while len(digits) < n:
d = random.randint(0, 9)
if d == 0 and not digits.keys():
continue
else:
if not digits.get(str(d), None):
digits[str(d)] = 1
return int(''.join(digits.keys()))
# and couple of simpler approaches
# http://codereview.stackexchange.com/a/69799/58086
def randN1(n):
assert n<=10
digits = list(range(10))
while digits[0] == 0:
random.shuffle(digits)
return int(''.join(str(d) for d in digits[:n]))
def randN2(n):
assert n<=10
digits = [0]
while digits[0] == 0:
digits = random.sample(range(10), n)
return int(''.join(str(d) for d in digits))
def _assert(randi, n):
assert len(str(randi)) == n
assert len(set(str(randi))) == n
for _ in range(100000):
_assert(randN(10), 10)
_assert(randN(1), 1)
_assert(randN(5), 5)
_assert(randN1(10), 10)
_assert(randN1(1), 1)
_assert(randN1(5), 5)
_assert(randN2(10), 10)
_assert(randN2(1), 1)
_assert(randN2(5), 5)
|
#!/usr/bin/env python3
import glob
import os
import platform
import sys
import shutil
import subprocess
import re
import multiprocessing
import itertools
from contextlib import contextmanager
# Overall script settings
this_project_package = f'{os.getcwd()}/bdsg'
this_project_source = f'{this_project_package}/src'
this_project_include = f'{this_project_package}/include'
this_project_deps = f'{this_project_package}/deps' # Now deps come from submodules.
bindings_dir = f'{this_project_package}/cmake_bindings'
this_project_namespace_to_bind = 'bdsg'
python_module_name = 'bdsg'
# We have one global notion of what an include looks like
INCLUDE_REGEX = re.compile('^\s*#include\s+(["<])(.*)([">])')
# We have one master list of source code extensions
SOURCE_EXTENSIONS = ['hpp', 'cpp', 'h', 'cc', 'c']
def clone_repos():
''' download the most recent copy of binder from git '''
if not glob.glob("binder"):
print("Binder not found, cloning repo...")
# TODO: Change back to https://github.com/RosettaCommons/binder.git
# master when https://github.com/RosettaCommons/binder/pull/99 is
# fixed.
subprocess.check_call(['git', 'clone', 'https://github.com/RosettaCommons/binder.git', 'binder'])
parent = os.getcwd()
os.chdir('binder')
subprocess.check_call(['git', 'checkout', '788ab422f9e919478944d79d5890441a964dd1db'])
os.chdir(parent)
def build_binder():
'''
Check for binder executable in the location we expect it.
If it's not there, build binder with the included script.
Expects to run in the binder directory.
:return: location of executable, relative to project directory
'''
if not glob.glob("./build/*/*/bin/*"):
print("Binder not compiled, using packaged build.py...")
# TODO: Use CPU counting that accounts for container quotas?
subprocess.check_call([sys.executable, 'build.py', '--jobs', str(multiprocessing.cpu_count())])
return "binder/" + glob.glob('./build/*/*/bin/')[0] + "binder"
def all_sources_and_headers(include_deps=False):
'''
Find all source or include files relevant to the project.
Yields their paths.
Note that we count the libhandlegraph sources as part of this project's
sources. We include them even if include_deps is false and we aren't
including the other dependencies.
'''
# And the paths we want to look in.
# Always include libhandlegraph.
paths = [f'{this_project_source}/**/*', f'{this_project_include}/**/*', f'{this_project_deps}/libhandlegraph/src/**/*']
if include_deps:
# Include all dependencies if asked
paths.append(f'{this_project_deps}/**/*')
# Get an iterable of glob iterables that search all combinations
all_globs = (glob.glob(f'{f}.{e}', recursive=True) for f, e in itertools.product(paths, SOURCE_EXTENSIONS))
# Deduplicate overlapping globs
seen = set()
for filename in itertools.chain.from_iterable(all_globs):
if filename not in seen:
yield filename
seen.add(filename)
# files = list()
# searchroot = os.path.abspath(f'{this_project_source}/../')
# for (root,dirs,fils) in os.walk(searchroot):
# for fl in fils:
# if(fl.endswith(("hpp","cpp","h","cc","c")) and ("src" in root or "include" in root)):
# files.append(root+"/"+fl)
# print(f'found source files {files}')
# for filename in files:
@contextmanager
def clean_includes():
'''
Goes through source code and replaces all quote-format includes with carrot-style includes on entry.
Reverts changes on exit.
'''
changes_made = dict()
# find instances of includes we need to change
for filename in all_sources_and_headers():
changes_made[filename] = list()
with open(filename, 'r') as fh:
for line in fh:
match = INCLUDE_REGEX.match(line)
if match:
replacement = f'#include <{match.group(2)}>\n'
changes_made[filename].append((line, replacement))
if not changes_made[filename]:
del changes_made[filename]
# edit files we need to alter and then resave them
for filename in changes_made.keys():
filedata = ""
listInd = 0
with open(filename, 'r') as fh:
for line in fh:
if listInd < len(changes_made[filename]) and line == changes_made[filename][listInd][0]:
filedata += changes_made[filename][listInd][1]
listInd += 1
else:
filedata += line
with open(filename, 'w') as fh:
fh.write(filedata)
try:
yield
finally:
for filename in changes_made.keys():
filedata = ""
listInd = 0
with open(filename, 'r') as fh:
for line in fh:
if listInd < len(changes_made[filename]) and line == changes_made[filename][listInd][1]:
filedata += changes_made[filename][listInd][0]
listInd += 1
else:
filedata += line
with open(filename, 'w') as fh:
fh.write(filedata)
def make_all_includes():
'''
Generates an .hpp file with all includes in this project that need to be bound.
We collect all the include directives from this project's sources.
'''
# Start by always including the binding-generation-time hook file, with
# things Binder needs to see to generate good bindings.
all_includes = ['#include <bdsg/internal/binder_hook_bind.hpp>']
all_include_filename = 'all_cmake_includes.hpp'
for filename in all_sources_and_headers(include_deps=False):
# Then for each file found by any search
with open(filename, 'r') as fh:
for line in fh:
# Look at each line
match = INCLUDE_REGEX.match(line)
if match:
# This is an include directive. Parse it
is_relative = match.group(1) == '"'
included_path = match.group(2)
assert (match.group(1) == '"') == (match.group(3) == '"'), "Mismatched include delimiters in " + filename + " for " + included_path
# Relative includes arent really relative paths so we can't really resolve them.
# Just collect all the includes as <>
all_includes.append(f'#include <{included_path}>')
all_includes = list(set(all_includes))
# This is to ensure that the list is always the same and doesn't
# depend on the filesystem state. Not technically necessary, but
# will cause inconsistent errors without it.
all_includes.sort()
with open(all_include_filename, 'w') as fh:
for include in all_includes:
fh.write(f'{include}\n')
return all_include_filename
def postprocess_bindings():
'''
Modify generated bindings files to correct Binder's STL-version-dependent code to portable code.
'''
# We apply each of these to all source files with sed.
transformations = ['s/class std::__cxx11::basic_string<char>/std::string/g', # We can't leave "class" in front of a non-template
's/std::__cxx11::basic_string<char>/std::string/g']
# TODO: Add transformations to catch problems from libc++ STL
for (directory, subdirectories, files) in os.walk(bindings_dir):
for filename in files:
if os.path.splitext(filename)[1].lstrip('.') in SOURCE_EXTENSIONS:
# For each source file, get its full path from where our process is
full_path = os.path.join(directory, filename)
for transformation in transformations:
# Apply all the transformations
subprocess.check_call(['sed', "-i.bak", transformation, full_path])
os.unlink(full_path + '.bak')
def make_bindings_code(all_includes_fn, binder_executable):
''' runs the binder executable with required parameters '''
# Find all the include directories for dependencies.
# Some dependency repos have an include and some have an src/include.
# BBHash and sparsepp have weird project structures and needs to be handled specially.
proj_include = (glob.glob(f'{this_project_deps}/*/include') +
glob.glob(f'{this_project_deps}/*/src/include') +
[f'{this_project_deps}/sparsepp',
f'{this_project_deps}/BBHash'])
# proj_include = " -I".join(proj_include)
proj_include = [f'-I{i}' for i in proj_include]
command = [binder_executable,
"--root-module", python_module_name,
"--prefix", f'{bindings_dir}/',
'--bind', this_project_namespace_to_bind,
"--config", "config.cfg",
all_includes_fn,
"--",
"-std=c++14",
f'-I{this_project_include}']
if platform.system() == 'Darwin':
# On (newer) Macs, Binder can't find the C++ STL because it is not in
# /usr/include but under a weird path returned by xcode-select -p and
# then /usr/include. See
# https://github.com/RosettaCommons/binder/issues/26#issuecomment-322538385
# and
# https://developer.apple.com/documentation/xcode_release_notes/xcode_10_release_notes#3035624
stl_path = os.path.join(subprocess.check_output(['xcode-select', '-p']).decode('utf8').strip(), 'usr', 'include', 'c++', 'v1')
command.append('-isystem' + stl_path)
# But we also need the MacOS SDK, which provides e.g. the "real" string.h that this STL depends on
sdk_path=subprocess.check_output(['xcrun', '-sdk', 'macosx', '--show-sdk-path']).decode('utf8').strip()
command.append('-isysroot' + sdk_path)
# Also make sure to look for libomp from macports or homebrew, like CMakeLists.txt does
command.append('-I/opt/local/include/libomp')
command.append('-I/usr/local/include')
command = command + proj_include
command.append("-DNDEBUG")
command.append("-v")
print('BINDER COMMAND:', ' '.join(command))
shutil.rmtree(bindings_dir, ignore_errors=True)
os.mkdir(bindings_dir)
subprocess.check_call(command)
# Do some post-processing on the bindings
postprocess_bindings()
def main():
clone_repos()
parent = os.getcwd()
os.chdir("binder")
binder_executable = build_binder()
os.chdir(parent)
with clean_includes():
all_includes_fn = make_all_includes()
make_bindings_code(all_includes_fn, binder_executable)
if __name__ == '__main__':
main()
|
#! /usr/bin/python
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from .basictoken import BASICToken as Token
from .flowsignal import FlowSignal
import math
import random
"""Implements a BASIC array, which may have up
to three dimensions of fixed size.
"""
class BASICArray:
def __init__(self, dimensions):
"""Initialises the object with the specified
number of dimensions. Maximum number of
dimensions is three
:param dimensions: List of array dimensions and their
corresponding sizes
"""
self.dims = min(3,len(dimensions))
if self.dims == 0:
raise SyntaxError("Zero dimensional array specified")
# Check for invalid sizes and ensure int
for i in range(self.dims):
if dimensions[i] < 0:
raise SyntaxError("Negative array size specified")
# Allow sizes like 1.0f, but not 1.1f
if int(dimensions[i]) != dimensions[i]:
raise SyntaxError("Fractional array size specified")
dimensions[i] = int(dimensions[i])
if self.dims == 1:
self.data = [None for x in range(dimensions[0])]
elif self.dims == 2:
self.data = [[None for x in range(dimensions[1])] for x in range(dimensions[0])]
else:
self.data = [[[None for x in range(dimensions[2])] for x in range(dimensions[1])] for x in range(dimensions[0])]
def pretty_print(self):
print(str(self.data))
"""Implements a BASIC parser that parses a single
statement when supplied.
"""
class BASICParser:
def __init__(self):
# Symbol table to hold variable names mapped
# to values
self.__symbol_table = {}
# Stack on which to store operands
# when evaluating expressions
self.__operand_stack = []
# List to hold contents of DATA statement
self.__data_values = []
# These values will be
# initialised on a per
# statement basis
self.__tokenlist = []
self.__tokenindex = None
# Set to keep track of extant loop variables
self. __loop_vars = set()
def parse(self, tokenlist, line_number):
"""Must be initialised with the list of
BTokens to be processed. These tokens
represent a BASIC statement without
its corresponding line number.
:param tokenlist: The tokenized program statement
:param line_number: The line number of the statement
:return: The FlowSignal to indicate to the program
how to branch if necessary, None otherwise
"""
self.__tokenlist = tokenlist
self.__tokenindex = 0
# Remember the line number to aid error reporting
self.__line_number = line_number
# Assign the first token
self.__token = self.__tokenlist[self.__tokenindex]
return self.__stmt()
def __advance(self):
"""Advances to the next token
"""
# Move to the next token
self.__tokenindex += 1
# Acquire the next token if there any left
if not self.__tokenindex >= len(self.__tokenlist):
self.__token = self.__tokenlist[self.__tokenindex]
def __consume(self, expected_category):
"""Consumes a token from the list
"""
if self.__token.category == expected_category:
self.__advance()
else:
raise RuntimeError('Expecting ' + Token.catnames[expected_category] +
' in line ' + str(self.__line_number))
def __stmt(self):
"""Parses a program statement
:return: The FlowSignal to indicate to the program
how to branch if necessary, None otherwise
"""
if self.__token.category in [Token.FOR, Token.IF, Token.NEXT,
Token.ON]:
return self.__compoundstmt()
else:
return self.__simplestmt()
def __simplestmt(self):
"""Parses a non-compound program statement
:return: The FlowSignal to indicate to the program
how to branch if necessary, None otherwise
"""
if self.__token.category == Token.NAME:
self.__assignmentstmt()
return None
elif self.__token.category == Token.PRINT:
self.__printstmt()
return None
elif self.__token.category == Token.LET:
self.__letstmt()
return None
elif self.__token.category == Token.GOTO:
return self.__gotostmt()
elif self.__token.category == Token.GOSUB:
return self.__gosubstmt()
elif self.__token.category == Token.RETURN:
return self.__returnstmt()
elif self.__token.category == Token.STOP:
return self.__stopstmt()
elif self.__token.category == Token.INPUT:
self.__inputstmt()
return None
elif self.__token.category == Token.DIM:
self.__dimstmt()
return None
elif self.__token.category == Token.RANDOMIZE:
self.__randomizestmt()
return None
elif self.__token.category == Token.DATA:
self.__datastmt()
return None
elif self.__token.category == Token.READ:
self.__readstmt()
return None
else:
# Ignore comments, but raise an error
# for anything else
if self.__token.category != Token.REM:
raise RuntimeError('Expecting program statement in line '
+ str(self.__line_number))
def __printstmt(self):
"""Parses a PRINT statement, causing
the value that is on top of the
operand stack to be printed on
the screen.
"""
self.__advance() # Advance past PRINT token
# Check there are items to print
if not self.__tokenindex >= len(self.__tokenlist):
self.__logexpr()
print(self.__operand_stack.pop(), end='')
while self.__token.category == Token.COMMA:
self.__advance()
self.__logexpr()
print(self.__operand_stack.pop(), end='')
# Final newline
print()
def __letstmt(self):
"""Parses a LET statement,
consuming the LET keyword.
"""
self.__advance() # Advance past the LET token
self.__assignmentstmt()
def __gotostmt(self):
"""Parses a GOTO statement
:return: A FlowSignal containing the target line number
of the GOTO
"""
self.__advance() # Advance past GOTO token
self.__expr()
# Set up and return the flow signal
return FlowSignal(ftarget=self.__operand_stack.pop())
def __gosubstmt(self):
"""Parses a GOSUB statement
:return: A FlowSignal containing the first line number
of the subroutine
"""
self.__advance() # Advance past GOSUB token
self.__expr()
# Set up and return the flow signal
return FlowSignal(ftarget=self.__operand_stack.pop(),
ftype=FlowSignal.GOSUB)
def __returnstmt(self):
"""Parses a RETURN statement"""
self.__advance() # Advance past RETURN token
# Set up and return the flow signal
return FlowSignal(ftype=FlowSignal.RETURN)
def __stopstmt(self):
"""Parses a STOP statement"""
self.__advance() # Advance past STOP token
return FlowSignal(ftype=FlowSignal.STOP)
def __assignmentstmt(self):
"""Parses an assignment statement,
placing the corresponding
variable and its value in the symbol
table.
"""
left = self.__token.lexeme # Save lexeme of
# the current token
self.__advance()
if self.__token.category == Token.LEFTPAREN:
# We are assiging to an array
self.__arrayassignmentstmt(left)
else:
# We are assigning to a simple variable
self.__consume(Token.ASSIGNOP)
self.__logexpr()
# Check that we are using the right variable name format
right = self.__operand_stack.pop()
if left.endswith('$') and not isinstance(right, str):
raise SyntaxError('Syntax error: Attempt to assign non string to string variable' +
' in line ' + str(self.__line_number))
elif not left.endswith('$') and isinstance(right, str):
raise SyntaxError('Syntax error: Attempt to assign string to numeric variable' +
' in line ' + str(self.__line_number))
self.__symbol_table[left] = right
def __dimstmt(self):
"""Parses DIM statement and creates a symbol
table entry for an array of the specified
dimensions.
"""
self.__advance() # Advance past DIM keyword
# Extract the array name, append a suffix so
# that we can distinguish from simple variables
# in the symbol table
name = self.__token.lexeme + '_array'
self.__advance() # Advance past array name
self.__consume(Token.LEFTPAREN)
# Extract the dimensions
dimensions = []
if not self.__tokenindex >= len(self.__tokenlist):
self.__expr()
dimensions.append(self.__operand_stack.pop())
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
dimensions.append(self.__operand_stack.pop())
self.__consume(Token.RIGHTPAREN)
if len(dimensions) > 3:
raise SyntaxError("Maximum number of array dimensions is three " +
"in line " + str(self.__line_number))
self.__symbol_table[name] = BASICArray(dimensions)
def __arrayassignmentstmt(self, name):
"""Parses an assignment to an array variable
:param name: Array name
"""
self.__consume(Token.LEFTPAREN)
# Capture the index variables
# Extract the dimensions
indexvars = []
if not self.__tokenindex >= len(self.__tokenlist):
self.__expr()
indexvars.append(self.__operand_stack.pop())
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
indexvars.append(self.__operand_stack.pop())
try:
BASICarray = self.__symbol_table[name + '_array']
except KeyError:
raise KeyError('Array could not be found in line ' +
str(self.__line_number))
if BASICarray.dims != len(indexvars):
raise IndexError('Incorrect number of indices applied to array ' +
'in line ' + str(self.__line_number))
self.__consume(Token.RIGHTPAREN)
self.__consume(Token.ASSIGNOP)
self.__logexpr()
# Check that we are using the right variable name format
right = self.__operand_stack.pop()
if name.endswith('$') and not isinstance(right, str):
raise SyntaxError('Attempt to assign non string to string array' +
' in line ' + str(self.__line_number))
elif not name.endswith('$') and isinstance(right, str):
raise SyntaxError('Attempt to assign string to numeric array' +
' in line ' + str(self.__line_number))
# Assign to the specified array index
try:
if len(indexvars) == 1:
BASICarray.data[indexvars[0]] = right
elif len(indexvars) == 2:
BASICarray.data[indexvars[0]][indexvars[1]] = right
elif len(indexvars) == 3:
BASICarray.data[indexvars[0]][indexvars[1]][indexvars[2]] = right
except IndexError:
raise IndexError('Array index out of range in line ' +
str(self.__line_number))
def __inputstmt(self):
"""Parses an input statement, extracts the input
from the user and places the values into the
symbol table
"""
self.__advance() # Advance past INPUT token
prompt = '? '
if self.__token.category == Token.STRING:
# Acquire the input prompt
self.__logexpr()
prompt = self.__operand_stack.pop()
self.__consume(Token.COLON)
# Acquire the comma separated input variables
variables = []
if not self.__tokenindex >= len(self.__tokenlist):
if self.__token.category != Token.NAME:
raise ValueError('Expecting NAME in INPUT statement ' +
'in line ' + str(self.__line_number))
variables.append(self.__token.lexeme)
self.__advance() # Advance past variable
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
variables.append(self.__token.lexeme)
self.__advance() # Advance past variable
# Gather input from the user into the variables
inputvals = input(prompt).split(',', (len(variables)-1))
for variable in variables:
left = variable
try:
right = inputvals.pop(0)
if left.endswith('$'):
self.__symbol_table[left] = str(right)
elif not left.endswith('$'):
try:
self.__symbol_table[left] = int(right)
except ValueError:
try:
self.__symbol_table[left] = float(right)
except ValueError:
raise ValueError('String input provided to a numeric variable ' +
'in line ' + str(self.__line_number))
except IndexError:
# No more input to process
pass
def __datastmt(self):
"""Parses a DATA statement"""
self.__advance() # Advance past DATA token
# Acquire the comma separated values
if not self.__tokenindex >= len(self.__tokenlist):
self.__expr()
self.__data_values.append(self.__operand_stack.pop())
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
self.__data_values.append(self.__operand_stack.pop())
def __readstmt(self):
"""Parses a READ statement."""
self.__advance() # Advance past READ token
# Acquire the comma separated input variables
variables = []
if not self.__tokenindex >= len(self.__tokenlist):
variables.append(self.__token.lexeme)
self.__advance() # Advance past variable
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
variables.append(self.__token.lexeme)
self.__advance() # Advance past variable
# Check that we have enough data values to fill the
# variables
if len(variables) > len(self.__data_values):
raise RuntimeError('Insufficient constants supplied to READ ' +
'in line ' + str(self.__line_number))
# Gather input from the DATA statement into the variables
for variable in variables:
left = variable
right = readlist.pop(0)
if left.endswith('$'):
# Python inserts quotes around input data
if isinstance(right, int):
raise ValueError('Non-string input provided to a string variable ' +
'in line ' + str(self.__line_number))
else:
self.__symbol_table[left] = right
elif not left.endswith('$'):
try:
self.__symbol_table[left] = int(right)
except ValueError:
raise ValueError('String input provided to a numeric variable ' +
'in line ' + str(self.__line_number))
def __expr(self):
"""Parses a numerical expression consisting
of two terms being added or subtracted,
leaving the result on the operand stack.
"""
self.__term() # Pushes value of left term
# onto top of stack
while self.__token.category in [Token.PLUS, Token.MINUS]:
savedcategory = self.__token.category
self.__advance()
self.__term() # Pushes value of right term
# onto top of stack
rightoperand = self.__operand_stack.pop()
leftoperand = self.__operand_stack.pop()
if savedcategory == Token.PLUS:
self.__operand_stack.append(leftoperand + rightoperand)
else:
self.__operand_stack.append(leftoperand - rightoperand)
def __term(self):
"""Parses a numerical expression consisting
of two factors being multiplied together,
leaving the result on the operand stack.
"""
self.__sign = 1 # Initialise sign to keep track of unary
# minuses
self.__factor() # Leaves value of term on top of stack
while self.__token.category in [Token.TIMES, Token.DIVIDE, Token.MODULO]:
savedcategory = self.__token.category
self.__advance()
self.__sign = 1 # Initialise sign
self.__factor() # Leaves value of term on top of stack
rightoperand = self.__operand_stack.pop()
leftoperand = self.__operand_stack.pop()
if savedcategory == Token.TIMES:
self.__operand_stack.append(leftoperand * rightoperand)
elif savedcategory == Token.DIVIDE:
self.__operand_stack.append(leftoperand / rightoperand)
else:
self.__operand_stack.append(leftoperand % rightoperand)
def __factor(self):
"""Evaluates a numerical expression
and leaves its value on top of the
operand stack.
"""
if self.__token.category == Token.PLUS:
self.__advance()
self.__factor()
elif self.__token.category == Token.MINUS:
self.__sign = -self.__sign
self.__advance()
self.__factor()
elif self.__token.category == Token.UNSIGNEDINT:
self.__operand_stack.append(self.__sign*int(self.__token.lexeme))
self.__advance()
elif self.__token.category == Token.UNSIGNEDFLOAT:
self.__operand_stack.append(self.__sign*float(self.__token.lexeme))
self.__advance()
elif self.__token.category == Token.STRING:
self.__operand_stack.append(self.__token.lexeme)
self.__advance()
elif self.__token.category == Token.NAME and \
self.__token.category not in Token.functions:
# Check if this is a simple or array variable
if (self.__token.lexeme + '_array') in self.__symbol_table:
# Capture the current lexeme
arrayname = self.__token.lexeme + '_array'
# Array must be processed
# Capture the index variables
self.__advance() # Advance past the array name
try:
self.__consume(Token.LEFTPAREN)
except RuntimeError:
raise RuntimeError('Array used without index in line ' +
str(self.__line_number))
indexvars = []
if not self.__tokenindex >= len(self.__tokenlist):
self.__expr()
indexvars.append(self.__operand_stack.pop())
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
indexvars.append(self.__operand_stack.pop())
BASICarray = self.__symbol_table[arrayname]
arrayval = self.__get_array_val(BASICarray, indexvars)
if arrayval != None:
self.__operand_stack.append(self.__sign*arrayval)
else:
raise IndexError('Empty array value returned in line ' +
str(self.__line_number))
elif self.__token.lexeme in self.__symbol_table:
# Simple variable must be processed
self.__operand_stack.append(self.__sign*self.__symbol_table[self.__token.lexeme])
else:
raise RuntimeError('Name ' + self.__token.lexeme + ' is not defined' +
' in line ' + str(self.__line_number))
self.__advance()
elif self.__token.category == Token.LEFTPAREN:
self.__advance()
# Save sign because expr() calls term() which resets
# sign to 1
savesign = self.__sign
self.__logexpr() # Value of expr is pushed onto stack
if savesign == -1:
# Change sign of expression
self.__operand_stack[-1] = -self.__operand_stack[-1]
self.__consume(Token.RIGHTPAREN)
elif self.__token.category in Token.functions:
self.__operand_stack.append(self.__evaluate_function(self.__token.category))
else:
raise RuntimeError('Expecting factor in numeric expression' +
' in line ' + str(self.__line_number))
def __get_array_val(self, BASICarray, indexvars):
"""Extracts the value from the given BASICArray at the specified indexes
:param BASICarray: The BASICArray
:param indexvars: The list of indexes, one for each dimension
:return: The value at the indexed position in the array
"""
if BASICarray.dims != len(indexvars):
raise IndexError('Incorrect number of indices applied to array ' +
'in line ' + str(self.__line_number))
# Fetch the value from the array
try:
if len(indexvars) == 1:
arrayval = BASICarray.data[indexvars[0]]
elif len(indexvars) == 2:
arrayval = BASICarray.data[indexvars[0]][indexvars[1]]
elif len(indexvars) == 3:
arrayval = BASICarray.data[indexvars[0]][indexvars[1]][indexvars[2]]
except IndexError:
raise IndexError('Array index out of range in line ' +
str(self.__line_number))
return arrayval
def __compoundstmt(self):
"""Parses compound statements,
specifically if-then-else and
loops
:return: The FlowSignal to indicate to the program
how to branch if necessary, None otherwise
"""
if self.__token.category == Token.FOR:
return self.__forstmt()
elif self.__token.category == Token.NEXT:
return self.__nextstmt()
elif self.__token.category == Token.IF:
return self.__ifstmt()
elif self.__token.category == Token.ON:
return self.__ongosubstmt()
def __ifstmt(self):
"""Parses if-then-else
statements
:return: The FlowSignal to indicate to the program
how to branch if necessary, None otherwise
"""
self.__advance() # Advance past IF token
self.__logexpr()
# Save result of expression
saveval = self.__operand_stack.pop()
# Process the THEN part and save the jump value
self.__consume(Token.THEN)
if self.__token.category == Token.GOTO:
self.__advance() # Advance past optional GOTO
self.__expr()
then_jump = self.__operand_stack.pop()
# Jump if the expression evaluated to True
if saveval:
# Set up and return the flow signal
return FlowSignal(ftarget=then_jump)
# See if there is an ELSE part
if self.__token.category == Token.ELSE:
self.__advance()
if self.__token.category == Token.GOTO:
self.__advance() # Advance past optional GOTO
self.__expr()
# Set up and return the flow signal
return FlowSignal(ftarget=self.__operand_stack.pop())
else:
# No ELSE action
return None
def __forstmt(self):
"""Parses for loops
:return: The FlowSignal to indicate that
a loop start has been processed
"""
# Set up default loop increment value
step = 1
self.__advance() # Advance past FOR token
# Process the loop variable initialisation
loop_variable = self.__token.lexeme # Save lexeme of
# the current token
if loop_variable.endswith('$'):
raise SyntaxError('Syntax error: Loop variable is not numeric' +
' in line ' + str(self.__line_number))
self.__advance() # Advance past loop variable
self.__consume(Token.ASSIGNOP)
self.__expr()
# Check that we are using the right variable name format
# for numeric variables
start_val = self.__operand_stack.pop()
# Advance past the 'TO' keyword
self.__consume(Token.TO)
# Process the terminating value
self.__expr()
end_val = self.__operand_stack.pop()
# Check if there is a STEP value
increment = True
if not self.__tokenindex >= len(self.__tokenlist):
self.__consume(Token.STEP)
# Acquire the step value
self.__expr()
step = self.__operand_stack.pop()
# Check whether we are decrementing or
# incrementing
if step == 0:
raise IndexError('Zero step value supplied for loop' +
' in line ' + str(self.__line_number))
elif step < 0:
increment = False
# Now determine the status of the loop
# If the loop variable is not in the set of extant
# variables, this is the first time we have entered the loop
# Note that we cannot use the presence of the loop variable in
# the symbol table for this test, as the same variable may already
# have been instantiated elsewhere in the program
if loop_variable not in self.__loop_vars:
self.__symbol_table[loop_variable] = start_val
# Also add loop variable to set of extant loop
# variables
self.__loop_vars.add(loop_variable)
else:
# We need to modify the loop variable
# according to the STEP value
self.__symbol_table[loop_variable] += step
# If the loop variable has reached the end value,
# remove it from the set of extant loop variables to signal that
# this is the last loop iteration
stop = False
if increment and self.__symbol_table[loop_variable] > end_val:
stop = True
elif not increment and self.__symbol_table[loop_variable] < end_val:
stop = True
if stop:
# Loop must terminate, so remove loop vriable from set of
# extant loop variables and remove loop variable from
# symbol table
self.__loop_vars.remove(loop_variable)
del self.__symbol_table[loop_variable]
return FlowSignal(ftype=FlowSignal.LOOP_SKIP,
ftarget=loop_variable)
else:
# Set up and return the flow signal
return FlowSignal(ftype=FlowSignal.LOOP_BEGIN)
def __nextstmt(self):
"""Processes a NEXT statement that terminates
a loop
:return: A FlowSignal indicating that a loop
has been processed
"""
self.__advance() # Advance past NEXT token
return FlowSignal(ftype=FlowSignal.LOOP_REPEAT)
def __ongosubstmt(self):
"""Process the ON-GOSUB statement
:return: A FlowSignal indicating the subroutine line number
if the condition is true, None otherwise
"""
self.__advance() # Advance past ON token
self.__logexpr()
# Save result of expression
saveval = self.__operand_stack.pop()
# Process the GOSUB part and save the jump value
# if the condition is met
if saveval:
return self.__gosubstmt()
else:
return None
def __relexpr(self):
"""Parses a relational expression
"""
self.__expr()
# Since BASIC uses same operator for both
# assignment and equality, we need to check for this
if self.__token.category == Token.ASSIGNOP:
self.__token.category = Token.EQUAL
if self.__token.category in [Token.LESSER, Token.LESSEQUAL,
Token.GREATER, Token.GREATEQUAL,
Token.EQUAL, Token.NOTEQUAL]:
savecat = self.__token.category
self.__advance()
self.__expr()
right = self.__operand_stack.pop()
left = self.__operand_stack.pop()
if savecat == Token.EQUAL:
self.__operand_stack.append(left == right) # Push True or False
elif savecat == Token.NOTEQUAL:
self.__operand_stack.append(left != right) # Push True or False
elif savecat == Token.LESSER:
self.__operand_stack.append(left < right) # Push True or False
elif savecat == Token.GREATER:
self.__operand_stack.append(left > right) # Push True or False
elif savecat == Token.LESSEQUAL:
self.__operand_stack.append(left <= right) # Push True or False
elif savecat == Token.GREATEQUAL:
self.__operand_stack.append(left >= right) # Push True or False
def __logexpr(self):
"""Parses a logical expression
"""
self.__notexpr()
while self.__token.category in [Token.OR, Token.AND]:
savecat = self.__token.category
self.__advance()
self.__notexpr()
right = self.__operand_stack.pop()
left = self.__operand_stack.pop()
if savecat == Token.OR:
self.__operand_stack.append(left or right) # Push True or False
elif savecat == Token.AND:
self.__operand_stack.append(left and right) # Push True or False
def __notexpr(self):
"""Parses a logical not expression
"""
if self.__token.category == Token.NOT:
self.__advance()
self.__relexpr()
right = self.__operand_stack.pop()
self.__operand_stack.append(not right)
else:
self.__relexpr()
def __evaluate_function(self, category):
"""Evaluate the function in the statement
and return the result.
:return: The result of the function
"""
self.__advance() # Advance past function name
# Process arguments according to function
if category == Token.RND:
return random.random()
if category == Token.PI:
return math.pi
if category == Token.RNDINT:
self.__consume(Token.LEFTPAREN)
self.__expr()
lo = self.__operand_stack.pop()
self.__consume(Token.COMMA)
self.__expr()
hi = self.__operand_stack.pop()
self.__consume(Token.RIGHTPAREN)
try:
return random.randint(lo, hi)
except ValueError:
raise ValueError("Invalid value supplied to RNDINT in line " +
str(self.__line_number))
if category == Token.MAX:
self.__consume(Token.LEFTPAREN)
self.__expr()
value_list = [self.__operand_stack.pop()]
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
value_list.append(self.__operand_stack.pop())
self.__consume(Token.RIGHTPAREN)
try:
return max(*value_list)
except TypeError:
raise TypeError("Invalid type supplied to MAX in line " +
str(self.__line_number))
if category == Token.MIN:
self.__consume(Token.LEFTPAREN)
self.__expr()
value_list = [self.__operand_stack.pop()]
while self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
value_list.append(self.__operand_stack.pop())
self.__consume(Token.RIGHTPAREN)
try:
return min(*value_list)
except TypeError:
raise TypeError("Invalid type supplied to MIN in line " +
str(self.__line_number))
if category == Token.POW:
self.__consume(Token.LEFTPAREN)
self.__expr()
base = self.__operand_stack.pop()
self.__consume(Token.COMMA)
self.__expr()
exponent = self.__operand_stack.pop()
self.__consume(Token.RIGHTPAREN)
try:
return math.pow(base, exponent)
except ValueError:
raise ValueError("Invalid value supplied to POW in line " +
str(self.__line_number))
if category == Token.TERNARY:
self.__consume(Token.LEFTPAREN)
self.__logexpr()
condition = self.__operand_stack.pop()
self.__consume(Token.COMMA)
self.__expr()
whentrue = self.__operand_stack.pop()
self.__consume(Token.COMMA)
self.__expr()
whenfalse = self.__operand_stack.pop()
self.__consume(Token.RIGHTPAREN)
return whentrue if condition else whenfalse
if category == Token.MID:
self.__consume(Token.LEFTPAREN)
self.__expr()
instring = self.__operand_stack.pop()
self.__consume(Token.COMMA)
self.__expr()
start = self.__operand_stack.pop()
if self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
end = self.__operand_stack.pop()
else:
end = None
self.__consume(Token.RIGHTPAREN)
try:
return instring[start:end]
except TypeError:
raise TypeError("Invalid type supplied to MID$ in line " +
str(self.__line_number))
if category == Token.INSTR:
self.__consume(Token.LEFTPAREN)
self.__expr()
hackstackstring = self.__operand_stack.pop()
if not isinstance(hackstackstring, str):
raise TypeError("Invalid type supplied to INSTR in line " +
str(self.__line_number))
self.__consume(Token.COMMA)
self.__expr()
needlestring = self.__operand_stack.pop()
start = end = None
if self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
start = self.__operand_stack.pop()
if self.__token.category == Token.COMMA:
self.__advance() # Advance past comma
self.__expr()
end = self.__operand_stack.pop()
self.__consume(Token.RIGHTPAREN)
try:
return hackstackstring.find(needlestring, start, end)
except TypeError:
raise TypeError("Invalid type supplied to INSTR in line " +
str(self.__line_number))
self.__consume(Token.LEFTPAREN)
self.__expr()
value = self.__operand_stack.pop()
self.__consume(Token.RIGHTPAREN)
if category == Token.SQR:
try:
return math.sqrt(value)
except ValueError:
raise ValueError("Invalid value supplied to SQR in line " +
str(self.__line_number))
elif category == Token.ABS:
try:
return abs(value)
except ValueError:
raise ValueError("Invalid value supplied to ABS in line " +
str(self.__line_number))
elif category == Token.ATN:
try:
return math.atan(value)
except ValueError:
raise ValueError("Invalid value supplied to ATN in line " +
str(self.__line_number))
elif category == Token.COS:
try:
return math.cos(value)
except ValueError:
raise ValueError("Invalid value supplied to COS in line " +
str(self.__line_number))
elif category == Token.EXP:
try:
return math.exp(value)
except ValueError:
raise ValueError("Invalid value supplied to EXP in line " +
str(self.__line_number))
elif category == Token.INT:
try:
return math.floor(value)
except ValueError:
raise ValueError("Invalid value supplied to INT in line " +
str(self.__line_number))
elif category == Token.ROUND:
try:
return round(value)
except TypeError:
raise TypeError("Invalid type supplied to LEN in line " +
str(self.__line_number))
elif category == Token.LOG:
try:
return math.log(value)
except ValueError:
raise ValueError("Invalid value supplied to LOG in line " +
str(self.__line_number))
elif category == Token.SIN:
try:
return math.sin(value)
except ValueError:
raise ValueError("Invalid value supplied to SIN in line " +
str(self.__line_number))
elif category == Token.TAN:
try:
return math.tan(value)
except ValueError:
raise ValueError("Invalid value supplied to TAN in line " +
str(self.__line_number))
elif category == Token.CHR:
try:
return chr(value)
except TypeError:
raise TypeError("Invalid type supplied to CHR$ in line " +
str(self.__line_number))
except ValueError:
raise ValueError("Invalid value supplied to CHR$ in line " +
str(self.__line_number))
elif category == Token.ASC:
try:
return ord(value)
except TypeError:
raise TypeError("Invalid type supplied to ASC in line " +
str(self.__line_number))
except ValueError:
raise ValueError("Invalid value supplied to ASC in line " +
str(self.__line_number))
elif category == Token.STR:
return str(value)
elif category == Token.VAL:
try:
numeric = float(value)
if numeric.is_integer():
return int(numeric)
return numeric
# Like other BASIC variants, non-numeric strings return 0
except ValueError:
return 0
elif category == Token.LEN:
try:
return len(value)
except TypeError:
raise TypeError("Invalid type supplied to LEN in line " +
str(self.__line_number))
elif category == Token.UPPER:
if not isinstance(value, str):
raise TypeError("Invalid type supplied to UPPER$ in line " +
str(self.__line_number))
return value.upper()
elif category == Token.LOWER:
if not isinstance(value, str):
raise TypeError("Invalid type supplied to LOWER$ in line " +
str(self.__line_number))
return value.lower()
else:
raise SyntaxError("Unrecognised function in line " +
str(self.__line_number))
def __randomizestmt(self):
"""Implements a function to seed the random
number generator
"""
self.__advance() # Advance past RANDOMIZE token
if not self.__tokenindex >= len(self.__tokenlist):
self.__expr() # Process the seed
seed = self.__operand_stack.pop()
random.seed(seed)
else:
random.seed()
|
import pysrt
import os
from bs4 import BeautifulSoup
import sys
import ntpath
def extractHTML(x,y):
soup = BeautifulSoup(open("Friends-Script/season01/0101.html"))
index = 1
srtdict = {}
title = ''.join(soup.find('h1'))
fname = "Friends-Script/season01/0101.html"
ntpath.basename(fname)
subs = pysrt.open('Friends-Subtitles/S01/0101.srt')
fileLen = len(subs)
head, tail = ntpath.split(fname)
filename, dot, ext = tail.partition('.')
for node in soup.findAll('p'):
dialogues = ''.join(node.findAll(text=True))
#print "Dialogues: ", dialogues
character, colon, dia = dialogues.partition(':')
#print "DIA: ", dia
for l in range(0,fileLen):
starttime = subs[l].start
endtime = subs[l].end
#timedict[starttime] = endtime
#srtdict[l] = timedict
sub = subs[l].text
#print "SUB: ", sub
#sys.exit(0)
if sub in dia or dia in sub:
#print "sub", sub
srtdict[index] = {}
srtdict[index][starttime] = {}
srtdict[index][starttime][endtime] = {}
srtdict[index][starttime][endtime] = sub
index += 1
file = open(filename+'.txt', 'w+')
file.write(str(srtdict))
file.close()
def get_filepaths(directory):
file_paths = []
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
filepath = os.path.join(root, filename) # Join the two strings in order to form the full filepath.
file_paths.append(filepath) # Add it to the list.
return file_paths
# Run the above function and store its results in a variable.
full_file_paths = get_filepaths("/Users/shreyarajani/MSCS/MS-Sem4/Masters-Project/PythonCode/Preprocessing/Friends-Script")
def readingdir(x,y):
for f1 in x:
print "F1: ", f1
for f2 in y:
print "F2: ", f2
with open(f1, 'r') as file1:
with open(f2, 'r') as file2:
same = set(file1).intersection(file2)
#print "SAME: ", same
same.discard('\n')
with open('a.txt', 'w') as file_out:
for line in same:
file_out.write(line)
if __name__ == "__main__":
#Going through HTML files
x = get_filepaths('/Users/shreyarajani/MSCS/MS-Sem4/Masters-Project/PythonCode/Preprocessing/Friends-Script')
#Going through the SRT files
y = get_filepaths('/Users/shreyarajani/MSCS/MS-Sem4/Masters-Project/PythonCode/Preprocessing/Friends-Subtitles')
extractHTML(x,y)
|
# -*- coding: utf-8 -*-
import os
import pandas as pd
from pyEX import Client, PyEXception
def _get_data(symbol, start, end, client=None):
if client is None and not os.environ.get('IEX_TOKEN'):
raise PyEXception('Must provide pyEX client or set IEX_TOKEN environment variable')
elif client is None:
client = Client()
df = client.chartDF(symbol, '5y')
if start:
df = df[df.index > start]
if end:
df = df[df.index < end]
return df
def load_from_iex(symbols, start=None, end=None, client=None):
data = {}
for symbol in symbols:
data[symbol] = _get_data(symbol, start, end, client)
mi_data = pd.concat(data, axis=1)
return mi_data
|
# -*- coding: utf-8 -*-
# Transformer based Mutation Recognition of SETH Corpus (NLP-NER)
# Pipeline for prediciton of IOB-Tags in a given text using the best tuned model
# inspired by: https://huggingface.co/dslim/bert-base-NER
import torch # conda install pytorch=1.5
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
MODEL_NAME = "microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext"
UNIQUE_TAGS = ['O', 'B-Gene', 'I-Gene', 'B-SNP', 'I-SNP', 'B-RS']
DEVICE = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
print(f"\n Device: {DEVICE} \n")
def make_prediction(best_model_path, text):
best_model = AutoModelForTokenClassification.from_pretrained(best_model_path, num_labels=len(UNIQUE_TAGS)).to(DEVICE)
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
nlp = pipeline('ner', model=best_model, tokenizer=tokenizer)
prediction = nlp(text)
print(text, '\n', file=open("output.txt", "w"))
print(*prediction, sep = '\n', file=open("output.txt", "a"))
return prediction
def main():
print("\n -- Predict IOB-tags of a given text using best model -- \n")
model_path = "best_model" # path to tuned model that should be used for label prediciton
# text to be labeled
text = "In human glutathione transferase P1-1 (hGSTP1-1) position 146 is occupied by a glycine residue, which is located in a bend of a long loop that together with the alpha6-helix forms a substructure (GST motif II) maintained in all soluble GSTs. In the present study G146A and G146V mutants were generated by site-directed mutagenesis in order to investigate the function played by this conserved residue in folding and stability of hGSTP1-1."
# --> Mutations: G146A, G146V
print(f"Text to predict: \n{text}\n")
prediction = make_prediction(model_path, text)
print("\n Prediction of IOB-Tags (0 = O, 1 = B-Gene, 2 = I-Gene, 3 = B-SNP, 4 = I-SNP, 5 = B-RS) \n", *prediction, sep = '\n')
print("\n -- Done Prediction --")
return 0
if __name__ == "__main__":
main()
|
#!/home/ian/Documents/Instagram-pics/virtual/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
#!/usr/bin/python3
import copy
p1 = 0
p2 = 0
lines = open("day03.dat", "r").read().splitlines()
com = []
nocom = []
for i in range(len(lines[0])):
c0 = 0
c1 = 0
for line in lines:
if line[i] == "1":
c1 += 1
if line[i] == "0":
c0 += 1
if (c1 > c0):
com.append("0")
nocom.append("1")
elif (c1 < c0):
com.append("1")
nocom.append("0")
else:
print(i, c1, c0)
assert(0)
val = int("".join(com), 2)
val2 = int("".join(nocom), 2)
print("Part1:", val2*val)
#print("Part2: ", p2)
def mostCommon(vals, pos):
c0 = 0
c1 = 0
for val in vals:
if val[pos] == "0":
c0 += 1
elif val[pos] == "1":
c1 += 1
else:
assert(0)
return c1, c0
all = copy.deepcopy(lines)
tmp = []
t1 = 0
while len(all) > 1:
for i in range(len(all[0])):
#print("checking pos", i)
c1, c0 = mostCommon(all, i)
search = ""
if c1 >= c0:
search = "1"
else:
search = "0"
for v in all:
if v[i] == search:
tmp.append(v)
all = copy.deepcopy(tmp)
tmp = []
#print("Length", len(all))
if len(all) == 1:
t1 = int(all[0],2)
break
all = copy.deepcopy(lines)
while len(all) > 1:
for i in range(len(all[0])):
#print("checking pos", i)
c1, c0 = mostCommon(all, i)
search = ""
if c1 >= c0:
search = "0"
else:
search = "1"
for v in all:
if v[i] == search:
tmp.append(v)
all = copy.deepcopy(tmp)
tmp = []
#print("Length", len(all))
if len(all) == 1:
print("Part2:", int(all[0],2) * t1)
break
|
# -*- coding: utf-8 -*-
from django.http.request import QueryDict
from rest_framework import serializers
from ralph.api.tests._base import RalphAPITestCase
from ralph.data_center.tests.factories import IPAddressFactory
from ralph.security.api import SaveSecurityScanSerializer
from ralph.security.tests.factories import (
SecurityScanFactory,
VulnerabilityFactory
)
class SaveSecurityScanSerializerTests(RalphAPITestCase):
def setUp(self):
super().setUp()
self.security_scan = SecurityScanFactory()
def _dict2QueryDict(self, data_dict):
qdict = QueryDict('', mutable=True)
qdict.update(data_dict)
return qdict
def test_external_id_is_converted_to_local(self):
ip = IPAddressFactory(address="192.168.128.10")
vulnerability_1 = VulnerabilityFactory()
vulnerability_2 = VulnerabilityFactory()
data = self._dict2QueryDict({
'last_scan_date': '2015-01-01T00:00:00',
'scan_status': 'ok',
'next_scan_date': '2016-01-01T00:00:00',
'details_url': 'https://example.com/scan-deatils',
'rescan_url': 'https://example.com/rescan-url',
'host_ip': ip.address,
'vulnerabilities': vulnerability_1.id,
'external_vulnerabilities': vulnerability_2.external_vulnerability_id, # noqa
})
scan_serializer = SaveSecurityScanSerializer(
context={'request': None})
deserialized = scan_serializer.to_internal_value(data)
self.assertEqual(
deserialized['vulnerabilities'],
[vulnerability_1, vulnerability_2],
)
def test_error_raised_when_unknown_external_id(self):
ip = IPAddressFactory(address="192.168.128.10")
vulnerability = VulnerabilityFactory()
data = self._dict2QueryDict({
'last_scan_date': '2015-01-01T00:00:00',
'scan_status': 'ok',
'next_scan_date': '2016-01-01T00:00:00',
'details_url': 'https://example.com/scan-deatils',
'rescan_url': 'https://example.com/rescan-url',
'host_ip': ip.address,
'vulnerabilities': vulnerability.id,
'external_vulnerabilities': '12345678',
})
scan_serializer = SaveSecurityScanSerializer(
context={'request': None})
with self.assertRaises(serializers.ValidationError):
scan_serializer.to_internal_value(data)
|
# Description: The datatime.date Class
import time
from datetime import date
"""
The datetime.date object:
* A date object represents a date (year, month and day) in the current Gregorian calendar calendar.
* The attributes of the datetime.date object are year, month, and day.
"""
# Constructor
# 1. All arguments are REQUIRED.
# 2. Arguments may be ints or longs.
# 3. The value of various parameters are as follows
# MINYEAR <= year <= MAXYEAR
# 1 <= month <= 12
# 1 <= day <= number of days in the given month and year
print "Constructor"
print date(year=2015, month=12, day=25)
# Class Methods
print "\nClass Methods"
print date.today() # Return the current local date. Equivalent to date.fromtimestamp(time.time()).
print date.fromtimestamp(time.time()) # Return the local date corresponding to the POSIX timestamp.
# NOTE: It needs built-in time module instead of datetime.time.
print date.fromordinal(1) # January 1 of year 1 is day number 1, January 2 of year 1 is day number 2 etc.
# Class Attributes
print "\nClass Attributes"
print date.min # The earliest representable date, date(MINYEAR, 1, 1).
print date.max # The latest representable date, date(MAXYEAR, 12, 31).
print date.resolution # The smallest possible difference between date objects, timedelta(days=1).
print date.year # Between MINYEAR and MAXYEAR inclusive.
print date.month # Between 1 and 12 inclusive.
print date.day # Between 1 and the number of days in the given month of the given year.
# Instance Methods
print "\nInstance Methods"
print date.today().replace(year=2012, month=2, day=23) # Return a date after replacing the portion with parameters.
print date.today().timetuple().tm_year # Return a time.struct_time whose values can be accessed by index and by
# attribute name. The attributes names are
"""
# +-------+-----------+---------------------------+
# | Index | Attribute | Description |
# +-------+-----------+---------------------------+
# | 0 | tm_year | (for example, 1993) |
# | 1 | tm_mon | range [1, 12] |
# | 2 | tm_mday | range [1, 31] |
# | 3 | tm_hour | range [0, 23] |
# | 4 | tm_min | range [0, 59] |
# | 5 | tm_sec | range [0, 61]; |
# | 6 | tm_wday | range [0, 6], Monday is 0 |
# | 7 | tm_yday | range [1, 366] |
# | 8 | tm_isdst | 0, 1 or -1; |
# +-------+-----------+---------------------------+
"""
print date.today().toordinal() # Return the Gregorian ordinal, where January 1 of year 1 has ordinal 1.
print date.today().weekday() # Return the day of the week as an integer, where Monday = 0 and Sunday = 6.
print date.today().isoweekday() # Return the day of the week as an integer, where Monday = 1 and Sunday = 7.
print date.today().isocalendar() # Return a 3-tuple, (ISO year, ISO week number, ISO weekday).
print date.today().isoformat() # Return a string representing the date in ISO 8601 format, 'YYYY-MM-DD'.
print date.today().__str__() # For a date d, str(d) is equivalent to d.isoformat().
print date.today().ctime() # date(2002, 12, 4).ctime() == 'Wed Dec 4 00:00:00 2002'.
# date.ctime() is equivalent to time.ctime(time.mktime(d.timetuple()))
print date.today().strftime("%d-%m-%Y") # Format date
print date.today().__format__("%d-%m-%Y") # Same as date.strftime("%d-%m-%Y")
|
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
""" JSON output helpers. """
from typing import Dict, List
from codechecker_report_converter.report import Report
def convert(reports: List[Report]) -> Dict:
""" Convert the given reports to JSON format. """
version = 1
json_reports = []
for report in reports:
json_reports.append(report.to_json())
return {"version": version, "reports": json_reports}
|
from pupa.models import Jurisdiction, JurisdictionSession
from .base import BaseImporter
class JurisdictionImporter(BaseImporter):
_type = 'jurisdiction'
model_class = Jurisdiction
related_models = {'sessions': JurisdictionSession}
def __init__(self, jurisdiction_id):
super(JurisdictionImporter, self).__init__(jurisdiction_id)
def get_object(self, data):
return self.model_class.objects.get(id=data['id'])
|
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from typing import Optional
@dataclass
class User:
username: str
password: str
salt: str
is_enabled: bool
class AuthProvider(metaclass=ABCMeta):
@abstractmethod
async def create(self, user: User) -> bool:
...
@abstractmethod
async def read(self, username: str) -> Optional[User]:
...
@abstractmethod
async def update(self, user: User) -> None:
...
@abstractmethod
async def delete(self, username: str) -> None:
...
|
import unittest
from enumerater import Enumerater
import random
from predictor import Predictor
import os
from ddt import ddt, data
import copy
from info_str import NAS_CONFIG
@ddt
class Test_pred(unittest.TestCase):
global test_info
test_info = []
for i in range(10):
test_info.append(i)
def _data_sample(self, network_pool):
block_n = random.randint(0,3)
graph_id = random.randint(1, len(network_pool))
blocks = []
for i in range(block_n):
id = random.randint(0,len(network_pool))
blocks.append(network_pool[id].graph_template)
graph = network_pool[graph_id].graph_template
return blocks, graph
@data(*test_info)
def test_predictor(self):
pred = Predictor()
_depth = random.randint(0, 25)
_width = random.randint(0, 1)
_max_depth = random.randint(0, _depth)
# print('##', self._depth, self._width, self._max_depth)
NAS_CONFIG['enum']['depth'] = _depth
NAS_CONFIG['enum']['width'] = _width
NAS_CONFIG['enum']['max_depth'] = _max_depth
enum = Enumerater()
network_pool = enum.enumerate()
blocks, graph = self._data_sample(network_pool)
ops = pred.predictor(blocks, graph)
self.assertEqual(list, type(ops))
self.assertTrue(len(ops) == len(graph))
for op in ops:
if op[0] != 'pooling':
self.assertGreater(int(op[0]), 1)
self.assertLessEqual(int(op[0]), 1024)
self.assertGreater(int(op[1]), 0)
self.assertLessEqual(int(op[1]), 11)
if __name__ == '__main__':
for i in range(1):
unittest.main()
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plotting functions."""
import matplotlib.pyplot as plt
import numpy as np
import Orange
import scikit_posthocs as sp
import seaborn as sns
from analysis import data_utils
from common import experiment_utils
_DEFAULT_TICKS_COUNT = 12
_DEFAULT_LABEL_ROTATION = 30
def _formatted_hour_min(seconds):
"""Turns |seconds| seconds into %H:%m format.
We don't use to_datetime() or to_timedelta(), because we want to
show hours larger than 23, e.g.: 24h:00m.
"""
time_string = ''
hours = int(seconds / 60 / 60)
minutes = int(seconds / 60) % 60
if hours:
time_string += '%dh' % hours
if minutes:
if hours:
time_string += ':'
time_string += '%dm' % minutes
return time_string
def _formatted_title(benchmark_snapshot_df):
"""Return a formatted title with time and trial count."""
benchmark_name = benchmark_snapshot_df.benchmark.unique()[0]
stats_string = benchmark_name
stats_string += ' ('
snapshot_time = benchmark_snapshot_df.time.unique()[0]
stats_string += _formatted_hour_min(snapshot_time)
trial_count = benchmark_snapshot_df.fuzzer.value_counts().min()
stats_string += ', %d trials/fuzzer' % trial_count
stats_string += ')'
return stats_string
class Plotter:
"""Plotter that uses the same color for the same fuzzer."""
# Tableau 20 colors.
_COLOR_PALETTE = [
'#1f77b4',
'#98df8a',
'#d62728',
'#c7c7c7',
'#ff7f0e',
'#ff9896',
'#e377c2',
'#dbdb8d',
'#2ca02c',
'#c5b0d5',
'#7f7f7f',
'#9edae5',
'#aec7e8',
'#8c564b',
'#c49c94',
'#bcbd22',
'#ffbb78',
'#9467bd',
'#f7b6d2',
'#17becf',
]
def __init__(self, fuzzers, quick=False, logscale=False):
"""Instantiates plotter with list of |fuzzers|. If |quick| is True,
creates plots faster but, with less detail.
"""
self._fuzzer_colors = {
fuzzer: self._COLOR_PALETTE[idx % len(self._COLOR_PALETTE)]
for idx, fuzzer in enumerate(sorted(fuzzers))
}
self._quick = quick
self._logscale = logscale
# pylint: disable=no-self-use
def _write_plot_to_image(self,
plot_function,
data,
image_path,
wide=False,
**kwargs):
"""Writes the result of |plot_function(data)| to |image_path|.
If |wide|, then the image size will be twice as wide as normal.
"""
width = 6.4
height = 4.8
figsize = (2 * width, height) if wide else (width, height)
fig, axes = plt.subplots(figsize=figsize)
try:
plot_function(data, axes=axes, **kwargs)
fig.savefig(image_path, bbox_inches="tight")
finally:
plt.close(fig)
def coverage_growth_plot(self, benchmark_df, axes=None):
"""Draws coverage growth plot on given |axes|.
The fuzzer labels will be in the order of their mean coverage at the
snapshot time (typically, the end of experiment).
"""
benchmark_names = benchmark_df.benchmark.unique()
assert len(benchmark_names) == 1, 'Not a single benchmark data!'
benchmark_snapshot_df = data_utils.get_benchmark_snapshot(benchmark_df)
snapshot_time = benchmark_snapshot_df.time.unique()[0]
fuzzer_order = data_utils.benchmark_rank_by_mean(
benchmark_snapshot_df).index
axes = sns.lineplot(
y='edges_covered',
x='time',
hue='fuzzer',
hue_order=fuzzer_order,
data=benchmark_df[benchmark_df.time <= snapshot_time],
ci=None if self._quick else 95,
palette=self._fuzzer_colors,
ax=axes)
axes.set_title(_formatted_title(benchmark_snapshot_df))
# Indicate the snapshot time with a big red vertical line.
axes.axvline(x=snapshot_time, color='r')
# Move legend outside of the plot.
axes.legend(bbox_to_anchor=(1.00, 1),
borderaxespad=0,
loc='upper left',
frameon=False)
axes.set(ylabel='Edge coverage')
axes.set(xlabel='Time (hour:minute)')
if self._logscale:
axes.set_xscale('log')
ticks = np.logspace(
# Start from the time of the first measurement.
np.log10(experiment_utils.DEFAULT_SNAPSHOT_SECONDS),
np.log10(snapshot_time + 1), # Include tick at end time.
_DEFAULT_TICKS_COUNT)
else:
ticks = np.arange(
experiment_utils.DEFAULT_SNAPSHOT_SECONDS,
snapshot_time + 1, # Include tick at end time.
snapshot_time / _DEFAULT_TICKS_COUNT)
axes.set_xticks(ticks)
axes.set_xticklabels([_formatted_hour_min(t) for t in ticks])
sns.despine(ax=axes, trim=True)
def write_coverage_growth_plot(self, benchmark_df, image_path, wide=False):
"""Writes coverage growth plot."""
self._write_plot_to_image(self.coverage_growth_plot,
benchmark_df,
image_path,
wide=wide)
def violin_plot(self, benchmark_snapshot_df, axes=None):
"""Draws violin plot.
The fuzzer labels will be in the order of their median coverage.
"""
benchmark_names = benchmark_snapshot_df.benchmark.unique()
assert len(benchmark_names) == 1, 'Not a single benchmark data!'
assert benchmark_snapshot_df.time.nunique() == 1, 'Not a snapshot!'
fuzzer_order = data_utils.benchmark_rank_by_median(
benchmark_snapshot_df).index
# Another options is to use |boxplot| instead of |violinplot|. With
# boxplot the median/min/max/etc is more visible than on the violin,
# especially with distributions with high variance. It does not have
# however violinplot's kernel density estimation.
sns.violinplot(y='edges_covered',
x='fuzzer',
data=benchmark_snapshot_df,
order=fuzzer_order,
palette=self._fuzzer_colors,
ax=axes)
axes.set_title(_formatted_title(benchmark_snapshot_df))
axes.set(ylabel='Reached region coverage')
axes.set(xlabel='Fuzzer (highest median coverage on the left)')
axes.set_xticklabels(axes.get_xticklabels(),
rotation=_DEFAULT_LABEL_ROTATION,
horizontalalignment='right')
sns.despine(ax=axes, trim=True)
def write_violin_plot(self, benchmark_snapshot_df, image_path):
"""Writes violin plot."""
self._write_plot_to_image(self.violin_plot, benchmark_snapshot_df,
image_path)
def distribution_plot(self, benchmark_snapshot_df, axes=None):
"""Draws distribution plot.
The fuzzer labels will be in the order of their median coverage.
"""
benchmark_names = benchmark_snapshot_df.benchmark.unique()
assert len(benchmark_names) == 1, 'Not a single benchmark data!'
assert benchmark_snapshot_df.time.nunique() == 1, 'Not a snapshot!'
fuzzers_in_order = data_utils.benchmark_rank_by_median(
benchmark_snapshot_df).index
for fuzzer in fuzzers_in_order:
measurements_for_fuzzer = benchmark_snapshot_df[
benchmark_snapshot_df.fuzzer == fuzzer]
sns.distplot(measurements_for_fuzzer['edges_covered'],
hist=False,
label=fuzzer,
color=self._fuzzer_colors[fuzzer],
ax=axes)
axes.set_title(_formatted_title(benchmark_snapshot_df))
axes.legend(loc='upper right', frameon=False)
axes.set(xlabel='Edge coverage')
axes.set(ylabel='Density')
axes.set_xticklabels(axes.get_xticklabels(),
rotation=_DEFAULT_LABEL_ROTATION,
horizontalalignment='right')
def write_distribution_plot(self, benchmark_snapshot_df, image_path):
"""Writes distribution plot."""
self._write_plot_to_image(self.distribution_plot, benchmark_snapshot_df,
image_path)
def ranking_plot(self, benchmark_snapshot_df, axes=None):
"""Draws ranking plot.
The fuzzer labels will be in the order of their median coverage.
"""
benchmark_names = benchmark_snapshot_df.benchmark.unique()
assert len(benchmark_names) == 1, 'Not a single benchmark data!'
assert benchmark_snapshot_df.time.nunique() == 1, 'Not a snapshot!'
fuzzer_order = data_utils.benchmark_rank_by_median(
benchmark_snapshot_df).index
axes = sns.barplot(y='edges_covered',
x='fuzzer',
data=benchmark_snapshot_df,
order=fuzzer_order,
estimator=np.median,
palette=self._fuzzer_colors,
ax=axes)
axes.set_title(_formatted_title(benchmark_snapshot_df))
axes.set(ylabel='Reached region coverage')
axes.set(xlabel='Fuzzer (highest median coverage on the left)')
axes.set_xticklabels(axes.get_xticklabels(),
rotation=_DEFAULT_LABEL_ROTATION,
horizontalalignment='right')
sns.despine(ax=axes, trim=True)
def write_ranking_plot(self, benchmark_snapshot_df, image_path):
"""Writes ranking plot."""
self._write_plot_to_image(self.ranking_plot, benchmark_snapshot_df,
image_path)
def better_than_plot(self, better_than_table, axes=None):
"""Draws better than plot."""
cmap = ['white', '#005a32']
sns.heatmap(better_than_table,
vmin=0,
vmax=1,
cmap=cmap,
linewidths=0.5,
linecolor='0.5',
cbar=False,
ax=axes)
axes.set_title('One-tailed statistical test result')
axes.set(ylabel='If green, then fuzzer in the row')
xlabel = 'is statistically significantly better than fuzzer in column.'
axes.set(xlabel=xlabel)
axes.set_xticklabels(axes.get_xticklabels(),
rotation=_DEFAULT_LABEL_ROTATION,
horizontalalignment='right')
def write_better_than_plot(self, better_than_table, image_path):
"""Writes better than plot."""
self._write_plot_to_image(self.better_than_plot, better_than_table,
image_path)
def heatmap_plot(self, p_values, axes=None, symmetric=False):
"""Draws heatmap plot for visualizing statistical test results.
If |symmetric| is enabled, it masks out the upper triangle of the
p-value table (as it is redundant with the lower triangle).
"""
if symmetric:
mask = np.zeros_like(p_values)
mask[np.triu_indices_from(p_values)] = True
heatmap_args = {
'linewidths': 0.5,
'linecolor': '0.5',
'clip_on': False,
'square': True,
'cbar_ax_bbox': [0.85, 0.35, 0.04, 0.3],
'mask': mask if symmetric else None
}
sp.sign_plot(p_values, ax=axes, **heatmap_args)
def write_heatmap_plot(self, p_values, image_path, symmetric=False):
"""Writes heatmap plot."""
self._write_plot_to_image(self.heatmap_plot,
p_values,
image_path,
symmetric=symmetric)
def write_critical_difference_plot(self, average_ranks, num_of_benchmarks,
image_path):
"""Writes critical difference diagram."""
critical_difference = Orange.evaluation.compute_CD(
average_ranks.values, num_of_benchmarks)
Orange.evaluation.graph_ranks(average_ranks.values, average_ranks.index,
critical_difference)
fig = plt.gcf()
try:
fig.savefig(image_path, bbox_inches="tight")
finally:
plt.close(fig)
def unique_coverage_ranking_plot(self,
unique_region_cov_df_combined,
axes=None):
"""Draws unique_coverage_ranking plot. The fuzzer labels will be in
the order of their coverage."""
fuzzer_order = unique_region_cov_df_combined.sort_values(
by='unique_regions_covered', ascending=False).fuzzer
axes = sns.barplot(y='unique_regions_covered',
x='fuzzer',
data=unique_region_cov_df_combined,
order=fuzzer_order,
palette=self._fuzzer_colors,
ax=axes)
for patch in axes.patches:
axes.annotate(
format(patch.get_height(), '.2f'),
(patch.get_x() + patch.get_width() / 2., patch.get_height()),
ha='center',
va='center',
xytext=(0, 10),
textcoords='offset points')
sns.barplot(y='aggregated_edges_covered',
x='fuzzer',
data=unique_region_cov_df_combined,
order=fuzzer_order,
facecolor=(1, 1, 1, 0),
edgecolor='0.2',
ax=axes)
axes.set(ylabel='Reached unique edge coverage')
axes.set(xlabel='Fuzzer (highest coverage on the left)')
axes.set_xticklabels(axes.get_xticklabels(),
rotation=_DEFAULT_LABEL_ROTATION,
horizontalalignment='right')
sns.despine(ax=axes, trim=True)
def write_unique_coverage_ranking_plot(self, unique_region_cov_df_combined,
image_path):
"""Writes ranking plot for unique coverage."""
self._write_plot_to_image(self.unique_coverage_ranking_plot,
unique_region_cov_df_combined, image_path)
def pairwise_unique_coverage_heatmap_plot(self,
pairwise_unique_coverage_table,
axes=None):
"""Draws the heatmap to visualize the unique coverage between
each pair of fuzzers."""
heatmap_args = {
'annot': True,
'fmt': 'd',
'cmap': 'Blues',
'linewidths': 0.5
}
axes = sns.heatmap(pairwise_unique_coverage_table,
ax=axes,
**heatmap_args)
axes.set(ylabel='Not covered by')
axes.set(xlabel='Covered by')
def write_pairwise_unique_coverage_heatmap_plot(
self, pairwise_unique_coverage_table, image_path):
"""Writes pairwise unique coverage heatmap plot."""
self._write_plot_to_image(self.pairwise_unique_coverage_heatmap_plot,
pairwise_unique_coverage_table, image_path)
|
from PyQt5 import QtWidgets, QtGui, Qt
class CollapsableWidget(QtWidgets.QFrame):
def __init__(self, parent=None, direction=QtWidgets.QBoxLayout.LeftToRight, collapsed=False):
QtWidgets.QFrame.__init__(self, parent)
self.direction = direction
self.collapsed = collapsed
self.layout = QtWidgets.QBoxLayout(direction)
self.layout.setSpacing(0)
self.layout.setContentsMargins(0, 0, 0, 0)
self.box_frame = QtWidgets.QFrame()
self.box_frame.setObjectName("collapsable_frame")
self.box_layout = QtWidgets.QBoxLayout(direction)
self.box_layout.setSpacing(2)
self.box_layout.setContentsMargins(2, 2, 2, 2)
self.box_frame.setLayout(self.box_layout)
self.button_min = QtWidgets.QPushButton()
self.button_min.setMinimumWidth(10)
self.button_min.setMinimumHeight(10)
self.button_min.setSizePolicy(QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum))
self.button_min.setObjectName("btn_min")
self.button_min.clicked.connect(self.switchState)
self.layout.addWidget(self.button_min)
self.layout.addWidget(self.box_frame)
self.layout.setStretch(0, 0)
self.layout.setStretch(1, 1)
self.setLayout(self.layout)
self.setArrow()
if direction == QtWidgets.QBoxLayout.LeftToRight:
self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
else:
self.setSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
self.setObjectName("collapsable")
def addWidget(self, widget):
self.box_layout.addWidget(widget)
def addLayout(self, layout):
self.box_layout.addLayout(layout)
def addStretch(self, stretch):
self.box_layout.addStretch(stretch)
def setArrow(self):
self.button_min.setIcon(QtGui.QIcon("gui/icons/main/min-right.svg"))
def switchState(self):
if self.collapsed:
self.box_frame.show()
self.collapsed = False
else:
self.box_frame.hide()
self.collapsed = True
self.setArrow()
def collapse(self):
if not self.collapsed:
self.switchState()
def uncollapse(self):
if self.collapsed:
self.switchState()
|
import time
from . import utils
utils.compile_contract("OffchainDKG")
contract = utils.deploy_contract("OffchainDKG", should_add_simplified_call_interfaces=False)
time.sleep(1.0)
|
#!/usr/bin/env python3
from app.ioc import injector
from app.types import App
injector.get(App).run()
|
#!/usr/bin/env python
import sys
from cv2 import cv
def example2_4(image):
cv.NamedWindow("Example4-in")
cv.NamedWindow("Example4-out")
# show the image
cv.ShowImage("Example4-in", image)
# transform the input
out = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 3)
cv.Smooth(image, out, cv.CV_GAUSSIAN, 3, 3)
# show the smoothed image
cv.ShowImage("Example4-out", out)
#cv.ReleaseImage(out)
del(out)
# wait for key then cleanup
cv.WaitKey(0)
cv.DestroyWindow("Example4-in")
cv.DestroyWindow("Example4-out")
if __name__ == "__main__":
# check file is readable
try:
f = open(sys.argv[1], 'r')
except IndexError:
print >> sys.stderr, "You must supply a filename."
sys.exit(1)
except IOError:
raise
else:
f.close()
example2_4(cv.LoadImage(sys.argv[1]))
sys.exit(0)
|
"""
TensorFlow Models
==================
Creator: Til Gรคrtner
example file showing use of the given models
"""
# %% Imports
import tensorflow as tf
# %% Read Models
Wsym = tf.keras.models.load_model(r"..\models\Wsym_model_12-12-12")
Wdir = tf.keras.models.load_model(r"..\models\Wdir_model_18-18-18")
Pdir = tf.keras.models.load_model(r"..\models\Pdir_model_24-24-24")
# %% Test Evaluations
Fs = tf.eye(3, batch_shape=[25]) + tf.random_normal_initializer(mean=0, stddev=.1)([25,3,3])
Ws_Wsym, Ps_Wsym = Wsym(Fs)
Ws_Wdir, Ps_Wdir = Wdir(Fs)
Ps_Pdir = Pdir(Fs)
|
from typing import Callable, Tuple, ClassVar, Dict
import matplotlib.pyplot as plt
import numpy as np
class SparseMatrix:
values: ClassVar[Dict[Tuple[int, int], float]]
shape: ClassVar[Tuple[int, int]]
def __init__(self, values: Dict[Tuple[int, int], float], shape: Tuple[int, int]):
self.values = values.copy()
self.shape = shape
def get(self, at):
return self.values.get(at) or 0
def set(self, key, val):
self.values[key] = val
def __str__(self):
s = "--- Sparse Matrix ---\n"
for i in range(self.shape[0]):
for j in range(self.shape[1]):
a = self.values.get((i, j))
if a is None:
a = 0
s += "\t%f" % a
s += "\n"
s += "---------------------"
return s
def __add__(self, other):
if self.shape != other.shape:
raise (ArithmeticError("Shapes do not match"))
res = SparseMatrix({}, self.shape)
for i in range(self.shape[0]):
for j in range(self.shape[1]):
a = self.values.get((i, j))
b = other.values.get((i, j))
if a or b:
a = a or 0
b = b or 0
res.set((i, j), a + b)
return res
def __mul__(self, other):
if isinstance(other, SparseMatrix):
n1, k1 = self.shape
n2, k2 = other.shape
if k1 != n2:
raise (ArithmeticError("Wrong shapes"))
res = SparseMatrix({}, (n1, k2))
for i in range(n1):
for j in range(k2):
el = 0
for k in range(k1):
el += self.get((i, k)) * other.get((k, j))
if el != 0:
res.set((i, j), el)
return res
if isinstance(other, (int, float, complex)):
res = SparseMatrix(self.values, self.shape)
for k in res.values:
res.values[k] = res.values[k] * other
return res
raise ArithmeticError("Cannot multiply by %s" % type(other))
def __rmul__(self, other):
if isinstance(other, (int, float, complex)):
return self * other
raise ArithmeticError("Cannot multiply by %s" % type(other))
def newton(f: Callable[[float], float], df: Callable[[float], float], x0: float, e: float) -> Tuple[float, float]:
xn = x0
delta = abs(f(x0))
while delta > e:
xn = xn - f(xn) / df(xn)
delta = abs(f(xn))
return (xn, f(xn))
def jacobi(A, b, n, x=None):
if x is None:
x = np.zeros(A.shape[0])
D = np.diag(A)
R = A - np.diagflat(D)
for i in range(n):
x = (b - np.dot(R, x)) / D
return x
def jacobi_steps_error(A, b, n):
x = None
res = []
r = np.linalg.solve(A, b)
for i in range(n):
x = jacobi(A, b, 1, x)
err = square_error(x, r)
res += [err]
return res
def square_error(m1, m2):
res = 0
for (a, b) in zip(m1, m2):
res += (a - b) ** 2
return np.sqrt(res)
if __name__ == "__main__":
a = SparseMatrix({
(0, 0): 1,
(0, 1): 2,
(1, 0): 3,
(1, 1): 4,
(2, 0): 4,
(2, 1): 5,
(3, 0): 5,
(3, 1): 1
}, (4, 2))
b = SparseMatrix({
(0, 0): 1,
(0, 1): 2,
(0, 2): 3,
(1, 0): 4,
(1, 1): 5,
(1, 2): 6
}, (2, 3))
m1 = np.matrix([[1, 2],
[3, 4],
[4, 5],
[5, 1]])
m2 = np.matrix([[1, 2, 3],
[4, 5, 6]])
print(a * b)
print(m1 * m2)
print(2 * SparseMatrix({(1, 1): 7}, (3, 3)))
A2 = np.array([[15, -7.3], [7.1, 12.0]])
b2 = np.array([11.0, -133.0])
A3 = np.array([[4.0, -2.0, 1.0], [1.0, -3.0, 2.0], [-1.0, 2.0, 6.0]])
b3 = np.array([1.0, -12.5, 31.0])
A4 = np.array([[-44.0, -2.0, 11.0, 1], [3.35, 123.0, -3.0, 2.0], [-1, -1.3, 23.0, 6.0], [5.4, -4.2, 1, -78]])
b4 = np.array([110.0, -123.5, 323.1, 51])
n = 20
t = np.arange(0, n, 1)
plt.plot(t, jacobi_steps_error(A2, b2, n), 'rs', t, jacobi_steps_error(A3, b3, n), 'g^', t,
jacobi_steps_error(A4, b4, n), 'bd')
plt.show()
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
A recurrent GAN model that draws images
based on dialog/description turns in sequence
"""
import os
import gc
import torch
import torch.nn as nn
from torch.nn import DataParallel
import numpy as np
from ..models.networks.generator_factory import GeneratorFactory
from ..models.networks.discriminator_factory import DiscriminatorFactory
from ..criticism.losses import LOSSES
from ..models.image_encoder import ImageEncoder
from ..models.sentence_encoder import SentenceEncoder
from ..models.condition_encoder import ConditionEncoder
from ..inference.optim import OPTIM
from ..definitions.regularizers import gradient_penalty, kl_penalty
from ..utils.logger import Logger
from ..models import _recurrent_gan
class RecurrentGAN():
def __init__(self, cfg):
"""A recurrent GAN model, each time step a generated image
(x'_{t-1}) and the current question q_{t} are fed to the RNN
to produce the conditioning vector for the GAN.
The following equations describe this model:
- c_{t} = RNN(h_{t-1}, q_{t}, x^{~}_{t-1})
- x^{~}_{t} = G(z | c_{t})
"""
super(RecurrentGAN, self).__init__()
self.generator = DataParallel(
GeneratorFactory.create_instance(cfg)).cuda()
self.generator_optimizer = OPTIM[cfg.generator_optimizer](
self.generator.parameters(), cfg.generator_lr, cfg.generator_beta1,
cfg.generator_beta2, cfg.generator_weight_decay)
# discriminator
self.discriminator = DataParallel(
DiscriminatorFactory.create_instance(cfg)).cuda()
self.discriminator_optimizer = OPTIM[cfg.discriminator_optimizer](
self.discriminator.parameters(), cfg.discriminator_lr,
cfg.discriminator_beta1, cfg.discriminator_beta2,
cfg.discriminator_weight_decay)
# word-level instruction encoder
self.sentence_encoder = nn.DataParallel(SentenceEncoder(cfg)).cuda()
self.sentence_encoder_optimizer = OPTIM[cfg.gru_optimizer](
self.sentence_encoder.parameters(), cfg.gru_lr)
# instruction-level encoder, implemented by GRU.
self.use_history = cfg.use_history
if self.use_history:
self.rnn = nn.DataParallel(nn.GRU(cfg.input_dim,
cfg.hidden_dim,
batch_first=False),
dim=1).cuda()
self.rnn_optimizer = OPTIM[cfg.rnn_optimizer](
self.rnn.parameters(), cfg.rnn_lr)
# layer norm for output of rnn
self.layer_norm = nn.DataParallel(nn.LayerNorm(cfg.hidden_dim)).cuda()
# fusion condition, as input of rnn (Actually we only use sentence
self.condition_encoder = DataParallel(ConditionEncoder(cfg)).cuda()
feature_encoding_params = list(self.condition_encoder.parameters())
# image encoder
self.use_image_encoder = cfg.use_fg
if self.use_image_encoder:
self.image_encoder = DataParallel(
ImageEncoder(cfg)).cuda() # ็จไบgenerator็image encoder
feature_encoding_params += list(self.image_encoder.parameters())
self.feature_encoders_optimizer = OPTIM['adam'](
feature_encoding_params, cfg.feature_encoder_lr)
# Criterion
self.criterion = LOSSES[cfg.criterion]()
self.aux_criterion = DataParallel(
torch.nn.BCELoss(reduction='none')).cuda()
self.cfg = cfg
self.logger = Logger(cfg.log_path, cfg.exp_name)
def train_batch(self, batch, epoch, iteration, visualizer, logger):
"""
The training scheme follows the following:
- Discriminator and Generator is updated every time step.
- RNN, SentenceEncoder and ImageEncoder parameters are
updated every sequence
@args:
batch:
image: (N, max_seq_len, C, H, W)
turn_lengths: (N, max_seq_len)
dialog_length: (N, )
turn_word_embedding: (N, max_seq_len, max_sent_len, embed_dim)
max_seq_len: the length of longest dialog in this batch
"""
batch_size = len(batch['image'])
max_seq_len = batch['image'].size(1)
prev_image = torch.FloatTensor(batch['background'])
prev_image = prev_image.unsqueeze(0) \
.repeat(batch_size, 1, 1, 1)
disc_prev_image = prev_image # (N, C, H, W)
# Initial inputs for the RNN set to zeros
hidden = torch.zeros(1, batch_size,
self.cfg.hidden_dim) # (1, N, hidden_dim)
prev_objects = torch.zeros(batch_size,
self.cfg.num_objects) # (N, num_objects)
teller_images = []
drawer_images = []
added_entities = []
for t in range(max_seq_len):
image = batch['image'][:, t] # (N, C, H, W)
turns_word_embedding = batch[
'turn_word_embedding'][:, t] # (N, max_sent_len, embed_dim)
turns_lengths = batch['turn_lengths'][:, t] # (batch_size, )
objects = batch['objects'][:, t] # (batch_size, )
seq_ended = t > (batch['dialog_length'] - 1) # (batch_size, )
image_feature_map, image_vec = self.image_encoder(prev_image)
turn_embedding, _ = self.sentence_encoder(turns_word_embedding,
turns_lengths)
rnn_condition = self.condition_encoder(turn_embedding, image_vec)
if self.use_history:
rnn_condition = rnn_condition.unsqueeze(
0
) # input vector for condition rnn, (1, batch_size, condition_dim)
output, hidden = self.rnn(rnn_condition, hidden)
output = output.squeeze(
0) # (batch_size, condition_output_dim)
output = self.layer_norm(output)
else:
output = rnn_condition
output = self.layer_norm(output)
fake_image, mu, logvar, sigma = self._forward_generator(
batch_size,
output.detach(
), # Instruction encoder is only optimized from discriminator.
image_feature_map)
visualizer.track_sigma(sigma)
hamming = objects - prev_objects
hamming = torch.clamp(hamming, min=0)
mask = (1 - seq_ended).to(torch.float32).cuda()
d_loss, d_real, d_fake, aux_loss, discriminator_gradient = \
self._optimize_discriminator(image,
fake_image.detach(),
disc_prev_image,
output,
mask,
hamming,
self.cfg.gp_reg,
self.cfg.aux_reg)
g_loss, generator_gradient = \
self._optimize_generator(fake_image,
disc_prev_image.detach(),
output.detach(),
objects,
self.cfg.aux_reg,
mask,
mu,
logvar)
if self.cfg.teacher_forcing:
prev_image = image
else:
prev_image = fake_image
disc_prev_image = image
prev_objects = objects
if (t + 1) % 2 == 0:
prev_image = prev_image.detach()
rnn_grads = []
gru_grads = []
condition_encoder_grads = []
img_encoder_grads = []
if t == max_seq_len - 1:
rnn_gradient, gru_gradient, condition_gradient,\
img_encoder_gradient = self._optimize_rnn()
gru_grads.append(gru_gradient.data.cpu().numpy())
condition_encoder_grads.append(
condition_gradient.data.cpu().numpy())
if self.use_image_encoder:
img_encoder_grads.append(
img_encoder_gradient.data.cpu().numpy())
if self.use_history:
rnn_grads.append(rnn_gradient.data.cpu().numpy())
visualizer.track(d_real, d_fake)
hamming = hamming.data.cpu().numpy()[0]
teller_images.extend(image[:4].data.numpy())
drawer_images.extend(fake_image[:4].data.cpu().numpy())
entities = str.join(',', list(batch['entities'][hamming > 0]))
added_entities.append(entities)
if iteration % self.cfg.vis_rate == 0:
visualizer.histogram()
self._plot_losses(visualizer, g_loss, d_loss, aux_loss, iteration)
rnn_gradient = np.array(rnn_grads).mean()
gru_gradient = np.array(gru_grads).mean()
condition_gradient = np.array(condition_encoder_grads).mean()
img_encoder_gradient = np.array(img_encoder_grads).mean()
rnn_grads, gru_grads = [], []
condition_encoder_grads, img_encoder_grads = [], []
self._plot_gradients(visualizer, rnn_gradient, generator_gradient,
discriminator_gradient, gru_gradient,
condition_gradient, img_encoder_gradient,
iteration)
self._draw_images(visualizer, teller_images, drawer_images, nrow=4)
self.logger.write(epoch, iteration, d_real, d_fake, d_loss, g_loss)
if isinstance(batch['turn'], list):
batch['turn'] = np.array(batch['turn']).transpose()
visualizer.write(batch['turn'][0])
visualizer.write(added_entities, var_name='entities')
teller_images = []
drawer_images = []
if iteration % self.cfg.save_rate == 0:
path = os.path.join(self.cfg.log_path, self.cfg.exp_name)
self._save(fake_image[:4], path, epoch, iteration)
if not self.cfg.debug:
self.save_model(path, epoch, iteration)
def _forward_generator(self, batch_size, condition, image_feature_maps):
# noise = torch.FloatTensor(batch_size,
# self.cfg.noise_dim).normal_(0, 1).cuda()
noise = torch.FloatTensor(batch_size,
self.cfg.noise_dim).zero_().cuda()
fake_images, mu, logvar, sigma = self.generator(
noise, condition, image_feature_maps)
return fake_images, mu, logvar, sigma
def _optimize_discriminator(self,
real_images,
fake_images,
prev_image,
condition,
mask,
objects,
gp_reg=0,
aux_reg=0):
"""Discriminator is updated every step independent of batch_size
RNN and the generator
"""
self.discriminator.zero_grad()
real_images.requires_grad_()
d_real, aux_real, _ = self.discriminator(real_images, condition,
prev_image)
d_fake, aux_fake, _ = self.discriminator(fake_images, condition,
prev_image)
if self.cfg.wrong_fake_ratio == 0:
d_wrong = None
else:
wrong_images = torch.cat((real_images[1:], real_images[0:1]),
dim=0)
wrong_prev = torch.cat((prev_image[1:], prev_image[0:1]), dim=0)
d_wrong, _, _ = self.discriminator(wrong_images, condition,
wrong_prev)
d_loss, aux_loss = self._discriminator_masked_loss(
d_real, d_fake, d_wrong, aux_real, aux_fake, objects, aux_reg,
mask)
d_loss.backward(retain_graph=True)
if gp_reg:
reg = gp_reg * self._masked_gradient_penalty(
d_real, real_images, mask)
reg.backward(retain_graph=True)
grad_norm = _recurrent_gan.get_grad_norm(
self.discriminator.parameters())
self.discriminator_optimizer.step()
d_loss_scalar = d_loss.item()
d_real_np = d_real.cpu().data.numpy()
d_fake_np = d_fake.cpu().data.numpy()
aux_loss_scalar = aux_loss.item() if isinstance(
aux_loss, torch.Tensor) else aux_loss
grad_norm_scalar = grad_norm.item()
del d_loss
del d_real
del d_fake
del aux_loss
del grad_norm
gc.collect()
return d_loss_scalar, d_real_np, d_fake_np, aux_loss_scalar, grad_norm_scalar
def _optimize_generator(self, fake_images, prev_image, condition, objects,
aux_reg, mask, mu, logvar):
self.generator.zero_grad()
d_fake, aux_fake, _ = self.discriminator(fake_images, condition,
prev_image)
g_loss = self._generator_masked_loss(d_fake, aux_fake, objects,
aux_reg, mu, logvar, mask)
g_loss.backward(retain_graph=True)
gen_grad_norm = _recurrent_gan.get_grad_norm(
self.generator.parameters())
self.generator_optimizer.step()
g_loss_scalar = g_loss.item()
gen_grad_norm_scalar = gen_grad_norm.item()
del g_loss
del gen_grad_norm
gc.collect()
return g_loss_scalar, gen_grad_norm_scalar
def _optimize_rnn(self):
if self.use_history:
torch.nn.utils.clip_grad_norm_(self.rnn.parameters(),
self.cfg.grad_clip)
rnn_grad_norm = _recurrent_gan.get_grad_norm(self.rnn.parameters())
self.rnn_optimizer.step()
self.rnn.zero_grad()
else:
rnn_grad_norm = None
gru_grad_norm = None
torch.nn.utils.clip_grad_norm_(self.sentence_encoder.parameters(),
self.cfg.grad_clip)
gru_grad_norm = _recurrent_gan.get_grad_norm(
self.sentence_encoder.parameters())
self.sentence_encoder_optimizer.step()
self.sentence_encoder.zero_grad()
ce_grad_norm = _recurrent_gan.get_grad_norm(
self.condition_encoder.parameters())
self.feature_encoders_optimizer.step()
self.condition_encoder.zero_grad()
if self.use_image_encoder:
ie_grad_norm = _recurrent_gan.get_grad_norm(
self.image_encoder.parameters())
self.image_encoder.zero_grad()
else:
ie_grad_norm = None
return rnn_grad_norm, gru_grad_norm, ce_grad_norm, ie_grad_norm
def _discriminator_masked_loss(self, d_real, d_fake, d_wrong, aux_real,
aux_fake, objects, aux_reg, mask):
"""Accumulates losses only for sequences that have not ended
to avoid back-propagation through padding"""
aux_loss = 0
sample_loss = self.criterion.discriminator(d_real, d_fake, d_wrong,
self.cfg.wrong_fake_ratio,
mask)
if aux_reg > 0:
aux_loss = (
self.aux_criterion(aux_real, objects) +
self.aux_criterion(aux_fake, objects)) * mask.unsqueeze(1)
aux_loss = aux_reg * aux_loss.mean()
d_loss = sample_loss + aux_loss
return d_loss, aux_loss
def _generator_masked_loss(self, d_fake, aux_fake, objects, aux_reg, mu,
logvar, mask):
"""Accumulates losses only for sequences that have not ended
to avoid back-propagation through padding"""
sample_loss = self.criterion.generator(d_fake * mask)
if aux_reg > 0:
aux_loss = aux_reg * (self.aux_criterion(aux_fake, objects) *
mask.unsqueeze(1)).mean()
else:
aux_loss = 0
if mu is not None:
kl_loss = self.cfg.cond_kl_reg * kl_penalty(mu, logvar, mask)
else:
kl_loss = 0
g_loss = sample_loss + aux_loss + kl_loss
return g_loss
def _masked_gradient_penalty(self, d_real, real_images, mask):
gp_reg = gradient_penalty(d_real, real_images).mean()
return gp_reg
# region Helpers
def _plot_losses(self, visualizer, g_loss, d_loss, aux_loss, iteration):
_recurrent_gan._plot_losses(self, visualizer, g_loss, d_loss, aux_loss,
iteration)
def _plot_gradients(self, visualizer, rnn, gen, disc, gru, ce, ie,
iteration):
_recurrent_gan._plot_gradients(self, visualizer, rnn, gen, disc, gru,
ce, ie, iteration)
def _draw_images(self, visualizer, real, fake, nrow):
_recurrent_gan.draw_images(self, visualizer, real, fake, nrow)
def _save(self, fake, path, epoch, iteration):
_recurrent_gan._save(self, fake, path, epoch, iteration)
def save_model(self, path, epoch, iteration):
_recurrent_gan.save_model(self, path, epoch, iteration)
def load_model(self, snapshot_path):
_recurrent_gan.load_model(self, snapshot_path)
# endregion
|
#
# PySNMP MIB module JUNIPER-UTIL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-UTIL-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:01:23 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion")
jnxUtilMibRoot, = mibBuilder.importSymbols("JUNIPER-SMI", "jnxUtilMibRoot")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
TimeTicks, Counter64, Gauge32, Bits, IpAddress, MibIdentifier, Counter32, Unsigned32, NotificationType, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Counter64", "Gauge32", "Bits", "IpAddress", "MibIdentifier", "Counter32", "Unsigned32", "NotificationType", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Integer32")
TextualConvention, DateAndTime, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DateAndTime", "DisplayString")
jnxUtil = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1))
jnxUtil.setRevisions(('2007-01-01 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: jnxUtil.setRevisionsDescriptions(('Initial revision.',))
if mibBuilder.loadTexts: jnxUtil.setLastUpdated('200701010000Z')
if mibBuilder.loadTexts: jnxUtil.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: jnxUtil.setContactInfo(' Juniper Technical Assistance Center Juniper Networks, Inc. 1194 N. Mathilda Avenue Sunnyvale, CA 94089 E-mail: support@juniper.net')
if mibBuilder.loadTexts: jnxUtil.setDescription('This MIB module provides a generic means for exposing junos data via SNMP. There are separate tables for each type of data, and specific instances of each type are identified by its corresponding name.')
jnxUtilData = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1))
jnxUtilCounter32Table = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 1), )
if mibBuilder.loadTexts: jnxUtilCounter32Table.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter32Table.setDescription('This table exposes generic Counter valued objects. Each counter instance, which can be populated via an internal junos interface, is identified by its corresponding name.')
jnxUtilCounter32Entry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 1, 1), ).setIndexNames((1, "JUNIPER-UTIL-MIB", "jnxUtilCounter32Name"))
if mibBuilder.loadTexts: jnxUtilCounter32Entry.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter32Entry.setDescription('Each entry exposes a separate Counter instance.')
jnxUtilCounter32Name = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80)))
if mibBuilder.loadTexts: jnxUtilCounter32Name.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter32Name.setDescription('The name assigned to this Counter instance.')
jnxUtilCounter32Value = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilCounter32Value.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter32Value.setDescription('The value of this generic, utility counter instance.')
jnxUtilCounter32Time = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 1, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilCounter32Time.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter32Time.setDescription('The time at which this instance was last populated.')
jnxUtilCounter64Table = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 2), )
if mibBuilder.loadTexts: jnxUtilCounter64Table.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter64Table.setDescription('This table exposes generic Counter64 valued objects. Each counter instance, which can be populated via an internal junos interface, is identified by its corresponding name.')
jnxUtilCounter64Entry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 2, 1), ).setIndexNames((1, "JUNIPER-UTIL-MIB", "jnxUtilCounter64Name"))
if mibBuilder.loadTexts: jnxUtilCounter64Entry.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter64Entry.setDescription('Each entry exposes a separate Counter64 instance.')
jnxUtilCounter64Name = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80)))
if mibBuilder.loadTexts: jnxUtilCounter64Name.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter64Name.setDescription('The name assigned to this object instance.')
jnxUtilCounter64Value = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilCounter64Value.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter64Value.setDescription('The value of this generic, utility object instance.')
jnxUtilCounter64Time = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 2, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilCounter64Time.setStatus('current')
if mibBuilder.loadTexts: jnxUtilCounter64Time.setDescription('The time at which this instance was last populated.')
jnxUtilIntegerTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 3), )
if mibBuilder.loadTexts: jnxUtilIntegerTable.setStatus('current')
if mibBuilder.loadTexts: jnxUtilIntegerTable.setDescription('This table exposes generic Integer32 valued objects. Each integer instance, which can be populated via an internal junos interface, is identified by its corresponding name.')
jnxUtilIntegerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 3, 1), ).setIndexNames((1, "JUNIPER-UTIL-MIB", "jnxUtilIntegerName"))
if mibBuilder.loadTexts: jnxUtilIntegerEntry.setStatus('current')
if mibBuilder.loadTexts: jnxUtilIntegerEntry.setDescription('Each entry exposes a separate Integer32 instance.')
jnxUtilIntegerName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 3, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80)))
if mibBuilder.loadTexts: jnxUtilIntegerName.setStatus('current')
if mibBuilder.loadTexts: jnxUtilIntegerName.setDescription('The name assigned to this object instance.')
jnxUtilIntegerValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilIntegerValue.setStatus('current')
if mibBuilder.loadTexts: jnxUtilIntegerValue.setDescription('The value of this generic, utility object instance.')
jnxUtilIntegerTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 3, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilIntegerTime.setStatus('current')
if mibBuilder.loadTexts: jnxUtilIntegerTime.setDescription('The time at which this instance was last populated.')
jnxUtilUintTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 4), )
if mibBuilder.loadTexts: jnxUtilUintTable.setStatus('current')
if mibBuilder.loadTexts: jnxUtilUintTable.setDescription('This table exposes generic Unsigned32 valued objects. Each integer instance, which can be populated via an internal junos interface, is identified by its corresponding name.')
jnxUtilUintEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 4, 1), ).setIndexNames((1, "JUNIPER-UTIL-MIB", "jnxUtilUintName"))
if mibBuilder.loadTexts: jnxUtilUintEntry.setStatus('current')
if mibBuilder.loadTexts: jnxUtilUintEntry.setDescription('Each entry exposes a separate Unsigned32 instance.')
jnxUtilUintName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80)))
if mibBuilder.loadTexts: jnxUtilUintName.setStatus('current')
if mibBuilder.loadTexts: jnxUtilUintName.setDescription('The name assigned to this object instance.')
jnxUtilUintValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 4, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilUintValue.setStatus('current')
if mibBuilder.loadTexts: jnxUtilUintValue.setDescription('The value of this generic, utility object instance.')
jnxUtilUintTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 4, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilUintTime.setStatus('current')
if mibBuilder.loadTexts: jnxUtilUintTime.setDescription('The time at which this instance was last populated.')
jnxUtilStringTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 5), )
if mibBuilder.loadTexts: jnxUtilStringTable.setStatus('current')
if mibBuilder.loadTexts: jnxUtilStringTable.setDescription('This table exposes generic OCTET STRING valued objects. Each string instance, which can be populated via an internal junos interface, is identified by its corresponding name.')
jnxUtilStringEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 5, 1), ).setIndexNames((1, "JUNIPER-UTIL-MIB", "jnxUtilStringName"))
if mibBuilder.loadTexts: jnxUtilStringEntry.setStatus('current')
if mibBuilder.loadTexts: jnxUtilStringEntry.setDescription('Each entry exposes a separate OCTET STRING instance.')
jnxUtilStringName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 5, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 80)))
if mibBuilder.loadTexts: jnxUtilStringName.setStatus('current')
if mibBuilder.loadTexts: jnxUtilStringName.setDescription('The name assigned to this object instance.')
jnxUtilStringValue = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 5, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 256))).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilStringValue.setStatus('current')
if mibBuilder.loadTexts: jnxUtilStringValue.setDescription('The value of this generic, utility object instance.')
jnxUtilStringTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 47, 1, 1, 5, 1, 3), DateAndTime()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxUtilStringTime.setStatus('current')
if mibBuilder.loadTexts: jnxUtilStringTime.setDescription('The time at which this instance was last populated.')
mibBuilder.exportSymbols("JUNIPER-UTIL-MIB", jnxUtilStringName=jnxUtilStringName, jnxUtilCounter32Name=jnxUtilCounter32Name, jnxUtilUintValue=jnxUtilUintValue, jnxUtil=jnxUtil, jnxUtilCounter32Time=jnxUtilCounter32Time, jnxUtilCounter32Value=jnxUtilCounter32Value, jnxUtilUintEntry=jnxUtilUintEntry, jnxUtilIntegerName=jnxUtilIntegerName, jnxUtilStringValue=jnxUtilStringValue, jnxUtilIntegerValue=jnxUtilIntegerValue, jnxUtilCounter64Table=jnxUtilCounter64Table, jnxUtilData=jnxUtilData, jnxUtilCounter64Entry=jnxUtilCounter64Entry, jnxUtilIntegerEntry=jnxUtilIntegerEntry, jnxUtilCounter64Value=jnxUtilCounter64Value, jnxUtilStringTime=jnxUtilStringTime, jnxUtilCounter64Name=jnxUtilCounter64Name, jnxUtilCounter32Table=jnxUtilCounter32Table, jnxUtilStringTable=jnxUtilStringTable, jnxUtilCounter32Entry=jnxUtilCounter32Entry, jnxUtilStringEntry=jnxUtilStringEntry, jnxUtilIntegerTime=jnxUtilIntegerTime, PYSNMP_MODULE_ID=jnxUtil, jnxUtilCounter64Time=jnxUtilCounter64Time, jnxUtilUintName=jnxUtilUintName, jnxUtilUintTime=jnxUtilUintTime, jnxUtilIntegerTable=jnxUtilIntegerTable, jnxUtilUintTable=jnxUtilUintTable)
|
from astropy import units as u
from astropy.cosmology import Planck15
import numpy as np
from scipy.integrate import cumtrapz, quad, simps, trapz
from time import time
try:
import pyccl as ccl
has_ccl = True
except ImportError:
has_ccl = False
from .helpers.cosmology import BaseCosmo
from .helpers.decorators import array, inMpc
from .helpers.lensing import BaseLensing
class Profile(BaseLensing):
"""Profile object
All profiles should inherit from ``Profile``
Defining your own profile is very simple. As an example, let's
define a simple power-law profile with two free parameters, the
normalization and the slope:
.. math::
f(r, a, b) = a * r**b
::
class PowerLaw(Profile):
def __init__(self, norm, slope, **kwargs):
self._set_shape(norm*slope)
super().__init__(**kwargs)
self.norm = norm
self.slope = slope
@array
def profile(self, r):
return self.norm * r**self.slope
That's it! The ``__init__()`` method needs only two lines of code
(in addition to attribute definitions). The last line is necessary
to allow ``profiley`` to automatically handle arbitrary shapes,
through the definition of a ``_shape`` attribute. Note that
``_set_shape`` takes only one argument (besides ``self``) - the
*product* of the class arguments. That is, if the arguments are
arrays, their dimensions must be such that a product can be carried
out without any manipulation.
Profile projections
-------------------
If the projection of this profile is analytical, any or all of the
following methods can also be specified: ::
projected(self, R)
projected_cumulative(self, R)
projected_excess(self, R)
offset_profile(self, R, Roff)
offset_projected(self, R, Roff)
offset_projected_cumulative(self, R, Roff)
offset_projected_excess(self, R, Roff)
If it does not have analytical expressions, these methods will also
exist, but they will be calculated numerically, so they may be
somewhat slower depending on the precision required.
Cosmology
---------
All ``Profile`` objects contain all cosmological information with
they have been initialized through the ``self.cosmo`` attribute,
which can be any ``astropy.cosmology.FLRW`` object.
"""
def __init__(self, z=0, overdensity=500,
los_loglimit=6, nsamples_los=200, resampling=20,
logleft=-10, left_samples=100, **kwargs):
"""Initialize a profile object
Optional arguments
------------------
z : float or ndarray of floats
redshift
overdensity : int or float
overdensity with respect to the background (does not apply
to all Profile children; see each specific class for
details)
"""
super().__init__(z, **kwargs)
# check overdensity
self._assert_overdensity(overdensity)
self.overdensity = overdensity
# for numerical integration -- perhaps these could be passed
# in a single dictionary
self.los_loglimit = los_loglimit
self.nsamples_los = nsamples_los
self.resampling = resampling
self.logleft = logleft
self.left_samples = left_samples
# empty init
self.__dimensions = None
@property
def _one(self):
if self.__one is None:
self.__one = u.dimensionless_unscaled
return self.__one
@property
def _dimensions(self):
if self.__dimensions is None:
self.__dimensions = tuple([1] * len(self.shape))
return self.__dimensions
@property
def shape(self):
if not hasattr(self, '_shape'):
msg = 'attribute shape does not exist. Please make sure' \
' to call ``self._set_shape`` in ``__init__``'
raise AttributeError(msg)
return self._shape
### private methods ###
def _assert_overdensity(self, overdensity):
assert not np.iterable(overdensity), \
'parameter overdensity must be a scalar'
try:
overdensity / 1
except TypeError as err:
raise TypeError('parameter overdensity must be a number') from err
if overdensity <= 0:
raise ValueError(
f'overdensity must be positive; received {overdensity}')
return
def _define_array(self, x):
if not np.iterable(x):
return x * np.ones(self._shape)
return x
def _set_shape(self, args_product):
if hasattr(self, 'shape'):
msg = 'attribute shape already set, cannot be overwritten'
raise ValueError(msg)
if np.iterable(args_product):
self._shape = args_product.shape
else:
self._shape = (1,)
### methods ###
@inMpc
@array
def projected(self, R: np.ndarray, log_rmin=-10, log_rmax=6,
integral_samples=200):
"""Line of sight projected profile, calculated numerically
Parameters
----------
R : np.ndarray
positions at which to calculate the projected profile
Optional arguments
------------------
log_rmin, log_rmax : float
lower and upper limits for logspace resampling for integration
integral_samples : int
number of samples to generate for Simpson-rule integration
of the projected profile
Notes on numerical integration
------------------------------
-The default values for the integration parameters give
numerical errors well below 0.1% over the range
R=[1e-5,100] Mpc, when comparing the numerical and
analytical implementations for an NFW profile (the
former can be obtained by defining a GNFW profile with
default kwargs)
"""
assert log_rmin < log_rmax, \
'argument log_rmin must be larger than log_rmax, received' \
f' {log_rmin,log_rmax}'
assert integral_samples // 1 == integral_samples, \
'argument integral_samples must be int, received' \
f' {integral_samples} ({type(integral_samples)})'
R_los = np.logspace(log_rmin, log_rmax, integral_samples)[:,None]
R = np.transpose(
[np.hypot(*np.meshgrid(R_los[:,i], R[:,0]))
for i in range(R_los.shape[1])],
axes=(1,2,0))
return 2 * simps(self.profile(R), R_los[None], axis=1)
@inMpc
@array
def projected_cumulative(self, R: np.ndarray, log_rmin: float=-10,
left_samples: int=100, resampling: int=20,
**kwargs):
"""Cumulative projected profile within R, calculated
numerically
Parameters
----------
R : np.ndarray
positions at which to calculate the projected profile
Optional arguments
------------------
log_rmin : float
lower limit for logspace resampling for integration. The
same value will be passed to ``self.projected``
resampling : int
number of samples into which each R-interval in the
data will be re-sampled. For instance, if two adjacent
data points are at Rbin=0.1,0.2 then for the integration
they will be replaced by
newRbin = np.logspace(np.log10(0.1), np.log10(0.2),
resampling, endpoint=False)
(the endpoint will be added when sampling the following bin)
left_samples : int
number of samples to use between log_rmin and the first
value of R, with a logarithmic sampling
Additional arguments will be passed to ``self.projected``
Notes on numerical integration
------------------------------
-The default values for the integration parameters give
numerical errors well below 0.1% over the range
R=[1e-5,100] Mpc, when comparing the numerical and
analytical implementations for an NFW profile (the
former can be obtained by defining a GNFW profile with
default kwargs)
"""
assert isinstance(left_samples, (int,np.integer)), \
'argument left_samples must be int, received' \
f' {left_samples} ({type(left_samples)})'
assert isinstance(resampling, (int,np.integer)), \
'argument resampling must be int, received' \
f' {resampling} ({type(resampling)})'
# for convenience
logR = np.log10(R[:,0])
# resample R
Ro = np.vstack(
[np.zeros(R.shape[1]),
np.logspace(log_rmin, logR[0], left_samples, endpoint=False)[:,None],
np.concatenate(
[np.logspace(logR[i-1], logR[i], resampling, endpoint=False)
for i in range(1, R.shape[0])])[:,None],
R.max()*np.ones(R.shape[1])]
)
j = np.arange(1+left_samples, Ro.shape[0], resampling)
integ = cumtrapz(
Ro*self.projected(Ro, log_rmin=log_rmin, **kwargs),
Ro, initial=0, axis=0)
return 2 * integ[j] / R**2
def projected_excess(self, R: np.ndarray, log_rmin=-10, log_rmax=6,
integral_samples=200,
left_samples=100, resampling=20):
"""Cumulative projected profile file excess at projected
distance(s) R, defined as
projected_excess(R) = projected_cumulative(R) - projected(R)
This profile is most commonly used as the galaxy weak lensing
*shear* observable, :math:`\gamma` where the projected excess
is referred to as the *excess surface density* (ESD or
:math:`\Delta\Sigma`),
.. math::
\Delta\Sigma(R) = \gamma\Sigma_\mathrm{c}
where :math:`\Sigma_\mathrm{c}` is the critical surface density
Parameters
----------
R : float or array of float
projected distance(s)
Optional arguments are passed to either ``self.projected`` or
``self.projected_cumulative``
"""
s1 = self.projected_cumulative(
R, log_rmin=log_rmin, left_samples=left_samples,
resampling=resampling, log_rmax=log_rmax,
integral_samples=integral_samples)
s2 = self.projected(R, log_rmin=log_rmin, log_rmax=log_rmax,
integral_samples=integral_samples)
return s1 - s2
def offset(self, func, R, Roff, theta_samples=360, weights=None,
**kwargs):
"""Calcuate any profile with a reference point different
from its center
Parameters
----------
func : callable
the funcion to calculate
R : np.ndarray, shape (N,)
radii at which to calculate the offset surface density
Roff : np.ndarray, shape (M,)
offsets with respect to the profile center
Optional parameters
-------------------
theta_samples : int
number of samples for the angular integral from 0 to 2*pi
weights : array of floats, shape (M,)
weights to apply to each profile corresponding to every
value of ``Roff``. See ``Returns`` below
kwargs : dict
arguments to pass to ``func``
Returns
-------
offset : np.ndarray,
offset profile. The shape of the array depends on whether
the ``weights`` argument is specified: if *not* specified
(default), then
.. code-block::
shape: (M,N,*self.shape)
if ``weights`` is provided, then the first axis will be
weight-averaged over so that
.. code-block::
shape: (N,*self.shape)
"""
if not isinstance(theta_samples, (int,np.integer)):
raise TypeError(
'argument theta_samples must be int, received' \
f' {theta_samples} ({type(theta_samples)})')
if not np.iterable(Roff):
Roff = np.array([Roff])
assert len(Roff.shape) == 1, 'argument Roff must be 1d'
if weights is not None:
if weights.size != Roff.size:
msg = 'weights must have the same size as Roff,' \
f' received {weights.size}, {Roff.size},' \
' respectively.'
raise ValueError(msg)
# can't get this to work using the @array decorator
R = R.reshape((R.size,*self._dimensions))
Roff = Roff.reshape((Roff.size,*self._dimensions,1,1))
theta = np.linspace(0, 2*np.pi, theta_samples)
theta1 = theta.reshape((theta_samples,*self._dimensions,1))
x = (Roff**2 + R**2 + 2*R*Roff*np.cos(theta1))**0.5
# looping slower but avoids memory issues
# generator for the function calls beforehand makes it a little faster
f = (func(i, **kwargs) for i in x)
off = np.array([trapz(fi, theta, axis=0) for fi in f])
if weights is not None:
# create a slice so we can multiply by weights
# along the first axis
s_ = [None] * off.ndim
s_[0] = slice(None)
Roff = np.squeeze(Roff)
off = trapz(weights[tuple(s_)]*off, Roff, axis=0) \
/ trapz(weights, Roff)
return off / (2*np.pi)
def offset_profile(self, R, Roff, **kwargs):
"""Alias for ``offset(profile, R, Roff, **kwargs)``"""
return self.offset(self.profile, R, Roff)
def offset_projected(self, R, Roff, **kwargs):
"""Alias for ``offset(projected, R, Roff, **kwargs)``"""
return self.offset(self.projected, R, Roff, **kwargs)
def offset_projected_cumulative(self, R, Roff, **kwargs):
"""Alias for ``offset(projected_cumulative, R, Roff,
**kwargs)``"""
return self.offset(self.projected_cumulative, R, Roff, **kwargs)
def offset_projected_excess(self, R, Roff, **kwargs):
"""Alias for ``offset(projected_excess, R, Roff,
**kwargs)``"""
return self.offset(self.projected_excess, R, Roff, **kwargs)
def _fourier(self, rmax=10, dr=0.1):
"""This is not working yet! Might just need to fall back to quad"""
r = np.arange(dr, rmax, dr)
f = self.profile(r)
# compute Fourier transform by numpy's FFT function
g = np.fft.fft(f)
print('g =', g.shape)
# frequency normalization factor is 2*np.pi/dt
k = np.fft.fftfreq(f.size)*2*np.pi/dr
# in order to get a discretisation of the continuous
# Fourier transform we need to multiply g by a phase factor
g = g * dr * np.exp(1j*k[:,None]*rmax) / (2*np.pi)**0.5
return k, g
# def twohalo(self, cosmo, bias, func, R, logm=np.logspace(12,16,41),
# bias_norm=1, **kwargs):
def _twohalo(self, cosmo, offset_func, R, logm_2h=np.logspace(12,16,41),
z_2h=np.linspace(0,2,21), **kwargs):
"""Calculate the two-halo term associated with the profile
Parameters
----------
cosmo : `pyccl.Cosmology` object
offset_func : callable or str
if callable, it must be the offset version of the
function in question. For instance, if one is
modeling the convergence, the function supplied
must be the offset convergence.
R : np.ndarray
logm : np.ndarray, optional
masses over which to calculate the 2h term. If not
specified, the masses used when defining the profile will be used.
Notes
-----
kwargs are passed to the function to be called. If the function
to be calculated is the convergence, the source redshift must
be supplied
"""
if not has_ccl:
msg = 'Core Cosmology Library (CCL) required for two halo' \
' calculations'
raise ModuleNotFoundError(msg)
assert isinstance(cosmo, ccl.Cosmology)
# assert isinstance(bias, ccl.halos.HaloBias)
# which function are we using?
_valid_funcs = ('esd', 'kappa', 'sigma', 'convergence',
'projected', 'projected_cumulative',
'projected_excess',
'enclosed_surface_density', 'excess_surface_density',
'surface_density')
if isinstance(offset_func, str):
assert offset_func in _valid_funcs, \
f'offset_func must be one of {_valid_funcs}'
if offset_func in ('sigma', 'projected', 'surface_density'):
offset_func = self.offset_projected
elif offset_func in ('projected_cumulative',
'enclosed_surface_density'):
offset_func = self.offset_projected_cumulative
elif offset_func in ('esd', 'projected_excess',
'excess_surface_density'):
offset_func = self.offset_projected_excess
elif offset_func in ('kappa', 'convergence'):
offset_func = self.offset_convergence
else:
assert callable(offset_func), \
'argument offset_func must be a string or callable'
assert offset_func.__name__.startswith('offset'), \
'if argument offset_func is a function as opposed to' \
'a string, it must be the offset version of the' \
'function of interest.'
# for each distance Ri, calculate the contribution from
# all halos that will have some contribution at that distance
# In order to speed this up a little, consider I don't need
# to calculate the offset profile for all halos at all distances
# but only for those who are close enough to that distance
# So probably what I actually need to do is just calculate
# for a grid in Roff and then move that to the required distance.
# For example,
# for i, Ri in R:
# twoh = offset_func(R, R[:i+1], **kwargs)
return
### aliases for backward compatibility
def surface_density(self, *args, **kwargs):
"""Alias for ``self.projected``"""
return self.projected(*args, **kwargs)
def enclosed_surface_density(self, *args, **kwargs):
"""Alias for ``self.projected_cumulative``"""
return self.projected_cumulative(*args, **kwargs)
def excess_surface_density(self, *args, **kwargs):
"""Alias for ``self.projected_excess``"""
return self.projected_excess(*args, **kwargs)
def offset_surface_density(self, *args, **kwargs):
"""Alias for ``self.offset_projected``"""
return self.offset_projected(*args, **kwargs)
def offset_enclosed_surface_density(self, *args, **kwargs):
"""Alias for ``self.offset_projected_cumulative``"""
return self.offset_projected_cumulative(*args, **kwargs)
def offset_excess_surface_density(self, *args, **kwargs):
"""Alias for ``self.offset_projected_excess``"""
return self.offset_projected_excess(*args, **kwargs)
### auxiliary methods to test integration performance ###
@inMpc
@array
def _quad_projected(self, R):
"""Not yet implemented"""
integrand = lambda r, Ro: self.profile((r**2+Ro**2)**0.5)
return np.array([[quad(integrand, 0, np.inf, args=(Rij,))
for Rij in Ri] for Ri in R])
@inMpc
@array
def _test_integration(self, R, output=None):
"""Test the fast-integration methods against the slow
but accurate quad function
Not yet implemented
"""
qsd = self.quad_projected(R)
sd = self.projected(R)
return
|
# read and write files use -> function open()
# manipulate paths use -> os.path module
# read all the lines in all the files on the command line see the -> fileinput module
# create temporal file and directories use -> tempfile module
# high-level file and directory handling -> shutil module
import os
#
print(os.name)
# returns the Current Working Directory(CWD) of the file used to execute the code
print(os.getcwd())
file = os.open("file.txt", os.O_WRONLY)
os.write(file, str.encode("hello"))
os.close(file)
"""
# print(os.stat("../../"))
d = os.environ # return a dictionary
# for i in d.items():
# print(i)
# get access of environment variables
# os.environ.get("<name_of_environment>")
print(os.environ.get("TEMP"))
# modify environment variable's value
# but any changes will be effective only for the current process
# where it was assigned and it will not change the value permanently.
# from
print("Value before:", os.environ.get("USERNAME"))
# to
os.environ["USERNAME"] = "Pedro2"
print("Value after:", os.environ["USERNAME"])
# add a new variable
os.environ["NEW_ENVIROMENT_VARIABLE"] = "C:\\ewvalue"
print(os.environ["NEW_ENVIROMENT_VARIABLE"])
# if enviroment variable doesn't exist
# form 1
# where os.environ.get(<name_of_environment>,<message_if_the_value_from_the_environment_is_None>)
print("Value:", os.environ.get("NAME_ENVIRONMENT", "This environment variable doesn't exist"))
# form 2
# using try: ... except KeyError: ...
try:
# if the enviroment variable doesn't exist raise exception, in this case KeyError
print("Value:", os.environ["NAME_ENVIRONMENT"])
except KeyError:
print("This environment variable doesn't exist")
#
"""
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-09 22:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Meal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.CharField(max_length=1000)),
('ingredients', models.ManyToManyField(to='dinner.Ingredient')),
],
),
migrations.CreateModel(
name='MealHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('comments', models.CharField(max_length=1000)),
('meal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='dinner.Meal')),
],
),
]
|
#
# PDF
# ---
# This script contains the
# class PDF
#
# Date: 2021-02-05
#
# Author: Lorenzo Coacci
#
#
# Copyright (C) 2021 Lorenzo Coacci
#
# pylint: disable=logging-format-interpolation,too-many-lines
#
# + + + + + Libraries + + + + +
# import basic
from golog.log import (
error_print,
warning_print,
ask_password,
list_print
)
from golog.tools import (
correct_filepath,
filepath_exists,
to_int,
correct_nonlist
)
# to manage pdfs
import PyPDF2
import tabula
import os
import ntpath
import uuid
import pandas as pd
# OCR
from pdf2image import convert_from_path
import cv2
from PIL import Image
import pyocr
import pyocr.builders
import pytesseract as pt
# + + + + + Libraries + + + + +
# + + + + + Classes + + + + +
# + + + + + PDF + + + + +
class PDF(object):
def __init__(
self,
pdf_file_path,
show_debug=False
):
"""Init and Load PDF file with PyPDF2"""
# preprocessing checks
pdf_file_path = str(pdf_file_path)
if not filepath_exists(pdf_file_path):
error_print(f"The file path {pdf_file_path} does not exist, please check again...")
return
# define
self.mode = 'rb'
self.pdf_file_path = pdf_file_path
# (get also pdf file name)
self.pdf_file_name = ntpath.basename(self.pdf_file_path).replace('.pdf', '')
# open the file
self.pdf_file = self.open_pdf(self.pdf_file_path, self.mode)
# objects related
self.pdf_reader = PyPDF2.PdfFileReader(self.pdf_file)
# data
self.num_pages = self.pdf_reader.numPages
self.pages = [
self.PDFPage(pdf=self, page_number=page, show_debug=show_debug)
for page in range(self.num_pages)
]
self.text = self.get_pdf_text()
# is the PDF protected by a password?
if self.is_encrypted():
warning_print("This PDF is encrypted, please enter the password to decrypt it:\n\t>> ")
password = ask_password()
if password is None:
error_print("Error: Problems while parsing your password")
return
result = self.pdf_reader.decrypt(password)
if not bool(result):
error_print("Could not decrypt your PDF, check your password and try again")
return
# debug
self.show_debug = show_debug
# + + + + + Inner Class - PDF Page + + + + +
class PDFPage(object):
def __init__(
self, pdf, page_number, show_debug=False
):
"""Init and Load a PDF Page"""
if page_number is None:
raise ValueError("The Page Number cannot be None, please insert an integer, every PDFPage has an int reference to its PDF object")
self.pdf = pdf
self.num_pages = self.pdf.num_pages
self.page_number = self._page_number_validated(page_number)
# data
self.pdf_page = self.pdf.pdf_reader.getPage(self.page_number)
self.page_text = self.get_text()
self.show_debug = show_debug
def __repr__(self):
return "<PDF Page [%s] >" % self.page_text[:10]
def __int__(self):
return len(self.page_text)
def __len__(self):
return len(self.page_text)
def __hash__(self):
return hash((self.page_text))
def __eq__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF Page object with a non PDF Page one")
return self.__hash__() == other.__hash__()
def __ne__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF Page object with a non PDF Page one")
return self.__hash__() != other.__hash__()
def _page_number_validated(self, page_number):
if not isinstance(page_number, int):
page_number = to_int(page_number)
if page_number is None:
error_print("The page number you input is not a int or not convertible to integer")
self._error_page_number_validation()
if page_number < 0:
if page_number < -(self.num_pages):
error_print(f"Cannot use the ngeative index with an abs number greater than the len {self.num_pages}")
self._error_page_number_validation()
else:
page_number = self.num_pages + page_number
elif page_number > self.num_pages:
error_print(f"The Page of the number cannot be more than the max num of pages {self.num_pages}")
self._error_page_number_validation()
return page_number
def get_text(self):
return str(self.pdf_page.extractText())
def export(self, output_file_path=None):
# adding rotated page object to pdf writer
writer = PyPDF2.PdfFileWriter()
writer.addPage(self.pdf_page)
# new pdf file object
if output_file_path is None:
to_replace = self.pdf.pdf_file_name
output_file_path = self.pdf.pdf_file_path.replace(to_replace, to_replace + f'_page_{str(self.page_number)}')
with open(output_file_path, 'wb') as new_file:
# writing rotated pages to new file
writer.write(new_file)
# + + + + + Inner Class - PDF Page + + + + +
def to_pdf(self, output_file_path=None):
writer = PyPDF2.PdfFileWriter()
# writing a PDF object
for page_number in range(self.num_pages):
# copy in tmp file
page = self.get_page(page_number).pdf_page
# adding to file
writer.addPage(page)
if output_file_path is None:
output_file_path = self.pdf_file_path
with open(output_file_path, 'wb') as new_file:
# writing rotated pages to new file
writer.write(new_file)
def __enter__(self):
return self.pdf
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Context Manager close PDF at the end"""
self._close_file(self.pdf_file)
def __repr__(self):
return "<PDF File [%s] >" % self.text[:10]
def __int__(self):
return self.num_pages
def __getitem__(self, page_number):
return self.pages[page_number]
def __len__(self):
return self.num_pages
def __add__(self, other):
# pdf merge
pdf_merger = PyPDF2.PdfFileMerger()
pdf_merger.append(self.pdf_file)
pdf_merger.append(other.pdf_file)
to_replace = self.pdf_file_name
output = self.pdf_file_path.replace(to_replace, f'{self.pdf_file_name}_{other.pdf_file_name}')
# writing combined pdf to output pdf file
with open(output, 'wb') as f:
pdf_merger.write(f)
# new file
new_pdf = PDF(output)
# delete file combined
os.remove(output)
return new_pdf
def __hash__(self):
return hash((self.num_pages, self.text))
def __eq__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF object with a non PDF one")
return self.__hash__() == other.__hash__()
def __ne__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF object with a non PDF one")
return self.__hash__() != other.__hash__()
def __lt__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF object with a non PDF one")
return self.num_pages < other.num_pages
def __le__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF object with a non PDF one")
return self.num_pages <= other.num_pages
def __gt__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF object with a non PDF one")
return self.num_pages > other.num_pages
def __ge__(self, other):
if not isinstance(other, PDF):
raise TypeError("Cannot compare a PDF object with a non PDF one")
return self.num_pages >= other.num_pages
def open_pdf(self, file_path, mode):
"""Open a file"""
return open(file_path, mode)
def _close_file(self, file):
"""An internal way of closing files in general"""
return file.close()
def close(self):
"""Explicitly close the PDF class file when you are done"""
return self.pdf_file.close()
def get_number_of_pages(self):
return self.pdf_reader.numPages
def is_encrypted(self):
return self.pdf_reader.isEncrypted
def _error_page_number_validation(self):
raise ValueError(
"Validation Error while parsing the PDF page number (must be a int)"
)
def get_page(self, page_number):
return self.pages[page_number]
def get_page_text(self, page_number):
return self.get_page(page_number).page_text
def get_pdf_text(self):
texts = [str(self.get_page_text(page_num)) for page_num in range(self.num_pages)]
return ' '.join(texts)
def get_pdf_tables(
self, multiple_tables=True,
area=None, pages=1,
output_format=None
):
df = tabula.read_pdf(
self.pdf_file_path,
area=area,
pages=pages,
output_format=output_format,
multiple_tables=multiple_tables
)
return df
def pdf_to_excel(self, excel_file_path):
return tabula.convert_into(
self.pdf_file_path,
excel_file_path, output_format="xlsx"
)
def pdf_rotate(self, rotation, pages=None):
# writing instance
writer = PyPDF2.PdfFileWriter()
if pages is None:
pages = list(range(self.num_pages))
else:
# validate
pages = correct_nonlist(pages)
# rotating each page
for page_number in range(self.num_pages):
# creating rotated page object
page = self.get_page(page_number).pdf_page
if page_number in pages:
page.rotateClockwise(rotation)
# adding rotated page object to pdf writer
writer.addPage(page)
# new pdf file object
to_replace = self.pdf_file_name
new_rotated_file_path = self.pdf_file_path.replace(to_replace, to_replace + '_rotated')
with open(new_rotated_file_path, 'wb') as new_file:
# writing rotated pages to new file
writer.write(new_file)
return True
# - - - - - OCR - - - - -
# ocr pdf to df
def pdf_to_df(self, keep_page_images=False, ocr=None):
"""Convert a PDF to images then with OCR to data frame based on boxes (autodetected or not)"""
available_ocrs = [
'pyocr', 'pytesseract'
]
ocr = 'pyocr' if ocr is None else ocr
if ocr not in available_ocrs:
error_print(f"This ocr {ocr} not available, only these ones {available_ocrs}")
return None
# convert to images
jpgs = self.pdf_to_jpgs()
if jpgs:
df = None
i = 1
for jpg in jpgs:
if ocr == 'pytesseract':
page_df = self.pytesseract_image_to_df(jpg)
elif ocr == 'pyocr':
page_df = self.pyocr_image_to_df(jpg)
else:
page_df = self.pyocr_image_to_df(jpg)
page_df['page'] = i
if keep_page_images:
self._image_auto_post_mark_regions(jpg, page_df, save_img=True)
# remove image
os.remove(jpg)
# first df or not?
if df is None:
df = page_df
else:
df = df.append(page_df, ignore_index=True)
i = i + 1
return df
else:
error_print("Could not convert this PDF pages to images for the OCR process")
return None
#ย - PY OCR -
def pyocr_get_tools(self):
#ย all tools
tools = pyocr.get_available_tools()
if len(tools) == 0:
warning_print("No OCR tool found")
return None
# The tools are returned in the recommended order of usage
return {f"{t.get_name()}": t for t in tools}
def pyocr_get_tool(self, tool_name):
#ย all tools
tools = self.pyocr_get_tools()
tool = None
if tools is not None:
for t in tools:
if t.get_name() == tool_name:
tool = t
break
return tool
def pyocr_get_tools_name(self):
#ย all tools
tools = self.pyocr_get_tools()
if len(tools) == 0:
warning_print("No OCR tool found")
return None
return [*map(lambda x: x.get_name(), tools)]
def pyocr_get_tool_name(self, tool):
# tool name
return None if tool is None else tool.get_name()
def pyocr_get_tool_languages(self, pyocr_tool):
return pyocr_tool.get_available_languages()
def pyocr_get_tool_language(self, tool, lang, default_eng=True):
"""Check if lang selected is available in the tool, if not default is english if default is applied"""
langs = self.pyocr_get_tool_languages(tool)
if lang in langs:
pass
else:
warning_print(f"This language {lang} is not in the lang list for this tool")
if default_eng:
warning_print(f"Selecting eng as default if there . . .")
if 'eng' in langs:
lang = 'eng'
else:
error_print(f"eng is not there as a lang!")
return None
return lang
def pyocr_image_to_text(self, image_path, tool=None, lang=None, broken_path_return_empty_txt=False):
if tool is None:
tool = self.pyocr_get_tools()["Tesseract (sh)"]
if lang is None:
lang = 'eng'
image_path = str(image_path)
if not filepath_exists(image_path):
error_print(f"The image file path {image_path} does not exist, please check again...")
if broken_path_return_empty_txt:
return ""
return None
txt = tool.image_to_string(
Image.open(image_path),
lang=str(lang),
builder=pyocr.builders.TextBuilder()
)
return txt
def pyocr_image_to_df(self, image_path, tool=None, lang=None, broken_path_return_empty_txt=False):
if tool is None:
tool = self.pyocr_get_tools()["Tesseract (sh)"]
if lang is None:
lang = 'eng'
image_path = str(image_path)
if not filepath_exists(image_path):
error_print(f"The image file path {image_path} does not exist, please check again...")
if broken_path_return_empty_txt:
return ""
return None
line_word_list = self.pyocr_image_to_line_and_boxes(
image_path, tool=tool, lang=lang,
broken_path_return_empty_txt=broken_path_return_empty_txt
)
# pass to a df
df = pd.DataFrame(
columns=[
"line_uuid", "line_position", "line_content",
"word_box_position", "word_box_content",
"word_box_confidence"
]
)
for line in line_word_list:
line_uuid = str(uuid.uuid4())
line_position = line.get('position')
line_content = line.get('content')
line_boxes = line.get('word_boxes')
if line_boxes is None:
df = df.append({
"line_uuid": line_uuid,
"line_position": line_position,
"line_content": line_content,
"word_box_position": None,
"word_box_content": None,
"word_box_confidence": None
}, ignore_index=True
)
else:
for box in line_boxes:
df = df.append({
"line_uuid": line_uuid,
"line_position": line_position,
"line_content": line_content,
"word_box_position": box.get('position'),
"word_box_content": box.get('content'),
"word_box_confidence": box.get('confidence')
}, ignore_index=True
)
return df
def pyocr_image_to_boxes(self, image_path, tool=None, lang=None, broken_path_return_empty_txt=False):
if tool is None:
tool = self.pyocr_get_tools()["Tesseract (sh)"]
if lang is None:
lang = 'eng'
image_path = str(image_path)
if not filepath_exists(image_path):
error_print(f"The image file path {image_path} does not exist, please check again...")
if broken_path_return_empty_txt:
return ""
return None
word_boxes = tool.image_to_string(
Image.open(image_path),
lang=str(lang),
builder=pyocr.builders.WordBoxBuilder()
)
return [{"position": box.position, "content": box.content, "confidence": box.confidence} for box in word_boxes]
def pyocr_image_to_line_and_boxes(self, image_path, tool=None, lang=None, broken_path_return_empty_txt=False):
if tool is None:
tool = self.pyocr_get_tools()["Tesseract (sh)"]
if lang is None:
lang = 'eng'
image_path = str(image_path)
if not filepath_exists(image_path):
error_print(f"The image file path {image_path} does not exist, please check again...")
if broken_path_return_empty_txt:
return ""
return None
line_and_word_boxes = tool.image_to_string(
Image.open(image_path),
lang=str(lang),
builder=pyocr.builders.LineBoxBuilder()
)
return [
{
"position": line.position,
"content": line.content,
"word_boxes": [{"position": box.position, "content": box.content, "confidence": box.confidence} for box in line.word_boxes]
} for line in line_and_word_boxes
]
def pyocr_image_to_digits(self, image_path, tool=None, lang=None, broken_path_return_empty_txt=False):
if tool is None:
tool = self.pyocr_get_tools()["Tesseract (sh)"]
if lang is None:
lang = 'eng'
image_path = str(image_path)
if not filepath_exists(image_path):
error_print(f"The image file path {image_path} does not exist, please check again...")
if broken_path_return_empty_txt:
return ""
return None
if 'Tesseract' not in tool.get_name():
error_print("Other tools than Tesseract do not support image to digits")
if broken_path_return_empty_txt:
return ""
return None
digits = tool.image_to_string(
Image.open(image_path),
lang=str(lang),
builder=pyocr.tesseract.DigitBuilder()
)
return digits
#ย - PY OCR -
# - PyTesseract -
def pytesseract_image_to_df(self, image_path, lang=None, broken_path_return_empty_txt=False):
if lang is None:
lang = 'eng'
if not filepath_exists(str(image_path)):
error_print(f"The image file path {str(image_path)} does not exist, please check again...")
if broken_path_return_empty_txt:
return ""
return None
im = cv2.imread(str(image_path))
df = pt.image_to_data(im, lang=lang, nice=0, output_type='data.frame')
return df
# - PyTesseract -
def pdf_to_jpgs(self, where=None, dpi=350, return_raw=False):
"""Convert each PDF page to a JPG image"""
pages = convert_from_path(self.pdf_file_path, dpi=dpi)
if where is None:
where = self.pdf_file_path.replace('.pdf', '').replace(self.pdf_file_name, '')
where = correct_filepath(where)
if not pages:
return []
if return_raw:
return pages
i = 1
paths = []
for page in pages:
image_name = f"{self.pdf_file_name}_page_" + str(i) + ".jpg"
page.save(where + image_name, "JPEG")
i = i + 1
paths.append(where + image_name)
return paths
def _image_auto_post_mark_regions(self, image_path, page_df, save_img=False):
# load image
im = cv2.imread(image_path)
box_positions = list(page_df['word_box_position'].values)
line_items_coordinates = []
for box in box_positions:
#area = cv2.contourArea(c)
#x, y, w, h = cv2.boundingRect(c)
x, y = box[0]
w, h = box[1]
image = cv2.rectangle(im, (x, y), (2200, y + h), color=(255, 0, 255), thickness=3)
line_items_coordinates.append([(x, y), (2200, y + h)])
if save_img:
image_name = ntpath.basename(image_path)
if '.' in image_name:
image_name = image_name.split('.')[0]
self._save_image_from_array(image, image_path.replace(image_name, image_name + '_auto_post_marked'))
return image, line_items_coordinates
def _image_auto_pre_mark_regions(self, image_path, save_img=False):
"""Pre Auto Mark Boxes/Region for an image"""
# from https://towardsdatascience.com/extracting-text-from-scanned-pdf-using-pytesseract-open-cv-cd670ee38052
# load image
im = cv2.imread(image_path)
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (9, 9), 0)
thresh = cv2.adaptiveThreshold(
blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV, 11, 30
)
# Dilate to combine adjacent text contours
kernel = cv2.getStructuringElement(
cv2.MORPH_RECT, (9, 9)
)
dilate = cv2.dilate(thresh, kernel, iterations=4)
# Find contours, highlight text areas, and extract ROIs
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
line_items_coordinates = []
for c in cnts:
area = cv2.contourArea(c)
x, y, w, h = cv2.boundingRect(c)
if y >= 600 and x <= 1000:
if area > 10000:
image = cv2.rectangle(im, (x, y), (2200, y + h), color=(255, 0, 255), thickness=3)
line_items_coordinates.append([(x, y), (2200, y + h)])
if y >= 2400 and x <= 2000:
image = cv2.rectangle(im, (x, y), (2200, y + h), color=(255, 0, 255), thickness=3)
line_items_coordinates.append([(x, y), (2200, y + h)])
if save_img:
image_name = ntpath.basename(image_path)
if '.' in image_name:
image_name = image_name.split('.')[0]
self._save_image_from_array(image, image_path.replace(image_name, image_name + '_auto_pre_marked'))
return image, line_items_coordinates
def _save_image_from_array(self, image, image_path):
# save from array
img = Image.fromarray(image, 'RGB')
img.save(image_path)
return image_path
# - - - - - OCR - - - - -
# + + + + + PDF + + + + +
# + + + + + Classes + + + + +
|
"""
filename : envelope_info.py
This script is responsible for fetching envelope informations from docusign.
"""
from docusign_esign import EnvelopesApi, ApiClient
from datetime import datetime, timedelta
import pandas as pd
import streamlit as st
import subprocess
from PyPDF2 import PdfFileMerger
from app.utils.utilities import update_args
def create_api_client(base_path, access_token):
"""
Function for creating api clinet, to interact with docusign.
input ::
- base_path : base path uri
- access_token : auth token received from docusign
output :: api_client object
"""
api_client = ApiClient()
api_client.host = base_path
api_client.set_default_header(header_name="Authorization", header_value=f"Bearer {access_token}")
return api_client
def list_envelopes():
"""
Function for getting list of all envelopes.
input ::
output :: pandas dataframe containing envelope informations.
"""
args = dict()
args = update_args(args)
api_client = create_api_client(args['base_path'], args['access_token'])
envelope_api = EnvelopesApi(api_client)
from_date = (datetime.utcnow() - timedelta(days=10)).isoformat()
results = envelope_api.list_status_changes(args['account_id'], from_date = from_date, include='recipients')
df = pd.DataFrame(results.to_dict()['envelopes'],
columns=["completed_date_time", "created_date_time", "email_subject",
"envelope_attachments","envelope_documents","envelope_id", "status"])
return df
@st.cache(show_spinner=False)
def list_envelope_recipient(envelope_id):
"""
Function for listing envelope recipients
input ::
- envelope_id : envelope_id to get it's recipients
output :: recipients details
"""
args = dict()
args = update_args(args)
api_client = create_api_client(args['base_path'], args['access_token'])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.list_recipients(args['account_id'], envelope_id)
results = results.to_dict()['signers'][0]
return results['name'], results['email'], results['status']
@st.cache(show_spinner=False)
def list_envelope_document(envelope_id):
"""
Function for listing envelope documents
input ::
- envelope_id : envelope_id to list it's documents
output :: documents details
"""
args = dict()
args = update_args(args)
api_client = create_api_client(args['base_path'], args['access_token'])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.list_documents(account_id=args["account_id"], envelope_id=envelope_id)
envelope_doc_items = list(map(lambda doc:
({"document_id": None})
if (doc.document_id == "certificate") else
({"document_id": doc.document_id, "name": doc.name, "type": doc.type}),
results.envelope_documents))
return envelope_doc_items
@st.cache(show_spinner=False)
def get_envelope_document(envelope_id):
"""
Function for getting envelope documents
input ::
- envelope_id : envelope_id to get it's documents
output :: downloaded document's path
"""
args = dict()
args = update_args(args)
api_client = create_api_client(args['base_path'], args['access_token'])
envelope_doc_items = list_envelope_document(envelope_id)
envelope_api = EnvelopesApi(api_client)
all_docs = []
for doc in envelope_doc_items:
if doc['document_id']:
file_path = envelope_api.get_document(
account_id=args["account_id"],
document_id=doc['document_id'],
envelope_id=envelope_id
)
all_docs.append(file_path)
merger = PdfFileMerger()
for pdf in all_docs:
merger.append(pdf)
merged_file = "/tmp/"+str(envelope_id)+".pdf"
merger.write(merged_file)
merger.close()
# docs_string = " ".join(all_docs)
# docs_string = "pdfunite "+docs_string
# merged_file = "/tmp/"+str(envelope_id)+".pdf"
# docs_string = docs_string+" "+merged_file
# subprocess.call([docs_string],shell=True)
return merged_file
@st.cache(show_spinner=False)
def get_envelope_tabs_value(envelope_id):
"""
Function for getting envelope tab(input fields) values.
input ::
- envelope_id : envelope_id to get it's tab
output :: tab details
"""
args = dict()
args = update_args(args)
api_client = create_api_client(args['base_path'], args['access_token'])
envelope_api = EnvelopesApi(api_client)
results = envelope_api.get_form_data(account_id=args["account_id"], envelope_id=envelope_id)
results = results.to_dict()['form_data']
tabs = dict()
for tab in results:
tabs[tab['name']] = tab['value']
return tabs
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import json
import os.path
import pathlib
import shutil
import subprocess
import tempfile
import textwrap
import click
import pkg_resources
import yaml
from decapod_admin import main
from decapod_api import handlers
from decapod_common import log
from decapod_common import pathutils
from decapod_common import plugins
from decapod_common import process
from decapod_common.models import kv
from decapod_common.models import playbook_configuration
from decapod_common.models import task
LOG = log.getLogger(__name__)
"""Logger."""
@main.cli.command(name="external-execution")
@click.argument(
"playbook-configuration-id",
type=click.UUID
)
@click.argument(
"playbook-configuration-version",
type=click.INT
)
@click.argument(
"path",
required=False,
type=click.Path(
dir_okay=False,
exists=False,
file_okay=True,
writable=True
)
)
@click.pass_context
def external_execution(ctx, playbook_configuration_id,
playbook_configuration_version, path):
"""Create bundle for external execution.
This command creates tarball which has everything required for
external execution of the plugin. This tarball includes commandline
for execution with Ansible, the contents of the plugin, generated
dynamic inventory.
Please pay attention to following:
\b
- This execution won't be added to the decapod and
will be done without any Decapod interaction
- You should have installed Ansible 2.3 or newer
- Please be sure that ceph-ansible is present in role path
of the ansible.
http://docs.ansible.com/ansible/intro_configuration.html#roles-path
https://github.com/ceph/ceph-ansible
"""
playbook_configuration_id = str(playbook_configuration_id)
subdir_path = "{0}-{1}".format(
playbook_configuration_id,
playbook_configuration_version
)
if path is None:
path = subdir_path
path = pathlib.Path(path).absolute()
playbook_config = \
playbook_configuration.PlaybookConfigurationModel.find_version(
playbook_configuration_id, playbook_configuration_version)
if not playbook_config:
ctx.fail("Cannot find such playbook config")
plugin = get_plugin(playbook_config.playbook_id)
working_dir = tempfile.TemporaryDirectory(prefix="exec")
ctx.call_on_close(working_dir.cleanup)
working_dir = pathlib.Path(working_dir.name)
tmpdir = working_dir.joinpath(subdir_path).absolute()
tmpdir.mkdir()
tmpdir.joinpath("fetch_directory").mkdir()
copy_decapod_common_playbooks(tmpdir)
copy_ceph_ansible(tmpdir)
copy_private_ssh_key(tmpdir)
copy_ansible_config(tmpdir)
copy_plugin_contents(tmpdir, plugin)
copy_monitor_keyring(tmpdir, playbook_config)
copy_decapod_data(tmpdir, playbook_config)
dump_inventory(tmpdir, playbook_config)
compose_commandline(tmpdir, playbook_config)
shutil.make_archive(path.as_posix(), "gztar", working_dir.as_posix())
click.echo(path.with_suffix(".tar.gz").as_posix())
def copy_decapod_common_playbooks(path):
destpath = path.joinpath("common_playbooks")
path_to_common_playbooks = pathutils.resource(
"decapod_common", "playbooks"
)
shutil.copytree(path_to_common_playbooks.as_posix(), destpath.as_posix())
def copy_ceph_ansible(path):
destpath = path.joinpath("ceph-ansible")
ceph_ansible_path = subprocess.check_output(
[
"python2", "-c",
(
"import pkg_resources; print "
"pkg_resources.resource_filename('decapod_ansible', "
"'ceph-ansible')"
)
]
)
ceph_ansible_path = ceph_ansible_path.decode("utf-8").rstrip()
shutil.copytree(ceph_ansible_path, destpath.as_posix())
def copy_private_ssh_key(path):
destpath = path.joinpath("ssh-private-key.pem")
sourcepath = pathutils.HOME.joinpath(".ssh", "id_rsa")
shutil.copy(sourcepath.as_posix(), destpath.as_posix())
destpath.chmod(0o400)
def copy_ansible_config(path):
destpath = path.joinpath("ansible.cfg")
sourcepath = pathutils.ROOT.joinpath("etc", "ansible", "ansible.cfg")
shutil.copy2(sourcepath.as_posix(), destpath.as_posix())
parser = configparser.RawConfigParser()
with destpath.open() as fp:
parser.read_file(fp)
defaults_to_remove = (
"action_plugins",
"callback_plugins",
"connection_plugins",
"filter_plugins",
"lookup_plugins",
"vars_plugins"
)
for name in defaults_to_remove:
try:
parser.remove_option("defaults", name)
except Exception:
pass
try:
parser.remove_section("ssh_connection")
except Exception:
pass
parser.set("defaults", "roles_path", "ceph-ansible/roles")
parser.set("defaults", "private_key_file", "ssh-private-key.pem")
parser.set("defaults", "action_plugins", "ceph-ansible/plugins/actions")
with destpath.open("w") as fp:
parser.write(fp)
def copy_plugin_contents(path, plugin):
module_name = plugin.module_name.split(".", 1)[0]
plugin_path = path.joinpath("plugin")
plugin_path.mkdir()
for entry in plugin.dist.resource_listdir(module_name):
if entry == "__pycache__":
continue
filename = plugin.dist.get_resource_filename(
pkg_resources._manager,
os.path.join(module_name, entry)
)
filename = pathlib.Path(filename).absolute()
destpath = plugin_path.joinpath(filename.name)
if filename.is_dir():
shutil.copytree(filename.as_posix(), destpath.as_posix(),
symlinks=True)
else:
shutil.copy2(filename.as_posix(), destpath.as_posix(),
follow_symlinks=False)
def copy_monitor_keyring(path, config):
secret = kv.KV.find_one("monitor_secret",
config.configuration["global_vars"]["fsid"])
if secret:
path.joinpath("fetch_directory", "monitor_keyring").write_text(
secret.value
)
def copy_decapod_data(path, config):
destpath = path.joinpath("decapod_data")
destpath.mkdir()
destpath.joinpath("playbook-configuration.json").write_text(
json_dumps(config)
)
destpath.joinpath("cluster.json").write_text(json_dumps(config.cluster))
server_path = destpath.joinpath("servers")
server_path.mkdir()
for srv in config.servers:
server_path.joinpath("{0}.json".format(srv.model_id)).write_text(
json_dumps(srv)
)
server_path.joinpath("{0}.json".format(srv.ip)).symlink_to(
"{0}.json".format(srv.model_id)
)
cluster_servers_path = destpath.joinpath("cluster_servers")
cluster_servers_path.mkdir()
for srv in config.cluster.server_list:
cluster_servers_path.joinpath(
"{0}.json".format(srv.model_id)).write_text(
json_dumps(srv)
)
cluster_servers_path.joinpath("{0}.json".format(srv.ip)).symlink_to(
"{0}.json".format(srv.model_id)
)
def dump_inventory(path, config):
inventory = config.configuration["inventory"]
hostvars = inventory.get("_meta", {})
hostvars = hostvars.get("hostvars", {})
children = {}
yaml_inventory = {
"all": {"children": children},
"vars": {},
}
for groupname, groupstruct in inventory.items():
if groupname == "_meta":
continue
hostsdict = {}
children[groupname] = {
"hosts": hostsdict,
"vars": {}
}
if isinstance(groupstruct, dict):
children[groupname]["vars"] = groupstruct["vars"]
for hostname in groupstruct["hosts"]:
hostsdict[hostname] = hostvars.get(hostname, {})
else:
for hostname in groupstruct:
hostsdict[hostname] = hostvars.get(hostname, {})
path.joinpath("inventory.yaml").write_text(
yaml.dump(yaml_inventory,
default_flow_style=False, explicit_start=True, indent=4)
)
def compose_commandline(path, playbook_config):
destpath = path.joinpath("execute.sh")
faketask = task.PlaybookPluginTask(
playbook_config.playbook_id, playbook_config._id, None)
plugin = plugins.get_public_playbook_plugins()[playbook_config.playbook_id]
plugin = plugin()
plugin.compose_command(faketask)
proc = plugin.proc
proc.env = {}
proc.options["--inventory-file"] = "inventory.yaml"
extras = json.loads(proc.options["--extra-vars"])
extras["decapod_common_playbooks"] = "../common_playbooks"
extras["fetch_directory"] = "fetch_directory"
extras = patch_plugin_paths(extras, plugin)
proc.options["--extra-vars"] = process.jsonify(extras)
proc.command = "ansible-playbook"
proc.args = [
path.joinpath("plugin", plugin.playbook_filename)
.relative_to(path).as_posix()
]
shell_script = """\
#!/bin/bash
set +e
cd "$(dirname "$0")"
{0}
cd - >/dev/null 2>&1
""".format(proc.printable_commandline)
shell_script = textwrap.dedent(shell_script)
destpath.write_text(shell_script)
destpath.chmod(0o755)
def patch_plugin_paths(extras, plugin):
if isinstance(extras, dict):
return {k: patch_plugin_paths(v, plugin) for k, v in extras.items()}
elif isinstance(extras, list):
return [patch_plugin_paths(el, plugin) for el in extras]
elif isinstance(extras, str):
module_name = plugin.module_name.split(".", 1)[0]
local_path = pkg_resources.resource_filename(module_name, "")
return extras.replace(local_path + "/", "")
return extras
def get_plugin(plugin_id):
all_plugins = {
pkg.name: pkg
for pkg in pkg_resources.iter_entry_points(group=plugins.NS_PLAYBOOKS)
}
return all_plugins[plugin_id]
def json_dumps(data):
return json.dumps(data, cls=handlers.JSONEncoder, sort_keys=True, indent=4)
|
#!/usr/bin/env python
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from kido import app
from kido.constants import PERMISSION_ADMIN
from kido.forms import EmailForm
from kido.models import db, User, Group, Permission
def main():
if not len(sys.argv) == 2:
print("Missing email.")
return -1
with app.app_context():
form = EmailForm.from_json({"email": sys.argv[1]})
if not form.validate():
print("Error: {}".format(form.errors["email"][0]))
return -2
email = form.data["email"]
user = User.query.filter_by(email=email).first()
if not user:
user = User(first_name="Admin", email=email, password="123456789")
db.session.add(user)
db.session.commit()
group = Group.query.filter_by(name="Admin").first()
if group is None:
group = Group(name="Admin")
group.permissions.append(Permission(name=PERMISSION_ADMIN))
db.session.add(group)
db.session.commit()
user.groups.append(group)
db.session.commit()
print("Added user as admin. They can now access the /admin url.")
return 0
if __name__ == "__main__":
sys.exit(main())
|
import os
import uuid
import shutil
import random
import filecmp
import itertools
from collections import namedtuple
from unittest.mock import patch, call
import pytest
import alphacopy
@pytest.fixture(scope='function')
def random_tree():
"""
This fixture creates a random folder tree in ./tmp/ filled with files
It deletes the entire tree after the test
The tree is unique for each test run and each test function
"""
def create_files(folder: str):
for i in range(random.randint(1, 3)):
_ = create_random_file(folder=folder)
def create_folders(folder: str, child_folders: int):
for i in range(child_folders + 1):
new_folder = os.path.join(folder, str(uuid.uuid4()))
os.mkdir(new_folder)
create_files(new_folder)
create_folders(new_folder, child_folders - 1)
root = 'tmp'
os.makedirs(root)
try:
create_folders(root, 3)
yield root
except: # noqa E722
raise
finally:
shutil.rmtree(root)
@pytest.fixture(scope='function')
def random_file():
try:
filename = create_random_file('.')
yield filename
except: # noqa E722
raise
finally:
try:
os.remove(filename)
except: # noqa E722
pass
def create_random_file(folder: str = '.'):
"""
This function create a random file at a given location
The file size ranges from 1 byte to 1 KB
The function returns a tuple with filename and filesize
"""
filename = os.path.join(folder, str(uuid.uuid4()))
filesize = random.randint(1, 1024) # file up to 1 MB
with open(filename, 'wb') as f:
f.write(os.urandom(filesize))
return filename
def test_copy_binary(random_file):
src = random_file
dst = 'output_file'
alphacopy.copy_file(src, dst)
assert os.path.exists(dst)
assert filecmp.cmp(src, dst)
os.remove(dst)
def test_check_disks_returns_bool():
x = alphacopy.check_disks('.', '.')
assert isinstance(x, bool)
@patch('alphacopy.copy.disk_usage')
def test_check_disks_disk_usage_calls(mock_disk_usage):
DiskResponse = namedtuple('DiskResponse', ['total', 'used', 'free'])
mock_disk_usage.return_value = DiskResponse(2, 1, 1)
_ = alphacopy.check_disks('a', 'b')
assert mock_disk_usage.call_count == 2
assert mock_disk_usage.call_args_list == [call('a'), call('b')]
@patch('alphacopy.copy.disk_usage')
def test_check_disks_space_ok(mock_disk_usage):
DiskResponse = namedtuple('DiskResponse', ['total', 'used', 'free'])
mock_disk_usage.return_value = DiskResponse(2, 1, 1)
assert alphacopy.check_disks('a', 'b')
@patch('alphacopy.copy.disk_usage')
def test_check_disks_space_not_ok(mock_disk_usage):
DiskResponse = namedtuple('DiskResponse', ['total', 'used', 'free'])
mock_disk_usage.return_value = DiskResponse(3, 2, 1)
assert not alphacopy.check_disks('a', 'b')
def test_external_disks_returns_list(random_tree):
return_value = alphacopy.external_disks(random_tree)
assert isinstance(return_value, list)
def test_external_disks_raises_value_error_when_src_doesnt_exist():
src = str(uuid.uuid4())
with pytest.raises(ValueError):
_ = alphacopy.external_disks(src)
def test_external_disks_raises_value_error_when_src_is_a_file(random_tree):
for root, folders, files in os.walk(random_tree):
if len(files) >= 1:
break
file = files[0]
with pytest.raises(ValueError):
_ = alphacopy.external_disks(file)
def test_external_disks_src_with_no_folders_and_no_files(random_tree):
for root, folders, files in os.walk(random_tree):
if len(folders) == 0 and len(files) == 0:
break
return_value = alphacopy.external_disks(root)
assert len(return_value) == 0
def test_external_disks_src_with_no_folders_but_some_files(random_tree):
for root, folders, files in os.walk(random_tree):
if len(folders) == 0 and len(files) > 0:
break
return_value = alphacopy.external_disks(root)
assert len(return_value) == 0
def test_external_disks_src_with_some_folders_not_mounted(random_tree):
for root, folders, files in os.walk(random_tree):
if len(folders) > 0:
break
return_value = alphacopy.external_disks(root)
assert len(return_value) == 0
def test_external_disks_src_with_one_folder_mounted(random_tree):
for root, folders, files in os.walk(random_tree):
if len(folders) == 1:
break
with patch('alphacopy.copy.os.path.ismount', return_value=True):
return_value = alphacopy.external_disks(root)
assert len(return_value) == 1
assert return_value[0] == os.path.join(root, folders[0])
def test_external_disks_src_with_many_folders_mounted(random_tree):
for root, folders, files in os.walk(random_tree):
if len(folders) > 1:
break
with patch('alphacopy.copy.os.path.ismount', return_value=True):
return_value = alphacopy.external_disks(root)
assert len(return_value) == len(folders)
assert set(return_value) == {os.path.join(root, f) for f in folders}
def test_external_disks_src_with_some_folders_mounted(random_tree):
for root, folders, files in os.walk(random_tree):
if len(folders) > 1:
break
folders = folders[::2]
side_effect = itertools.cycle((True, False))
with patch('alphacopy.copy.os.path.ismount', side_effect=side_effect):
return_value = alphacopy.external_disks(root)
assert len(return_value) == len(folders)
assert set(return_value) == {os.path.join(root, f) for f in folders}
|
import copy
import datetime
from collections import defaultdict
import numpy as np
import pycocotools.mask as mask_utils
class SegCocoEval:
def __init__(self, coco_gt=None, coco_dt=None, iou_type='segm'):
self.cocoGt = coco_gt # ground truth COCO API
self.cocoDt = coco_dt # detections COCO API
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iou_type=iou_type) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
self.abo = 0.
if coco_gt is not None:
self.params.imgIds = sorted(coco_gt.getImgIds())
self.params.catIds = sorted(coco_dt.getCatIds())
@staticmethod
def _to_mask(anns, coco):
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
def _prepare(self):
p = self.params
if p.useCats:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
if p.iouType == 'segm':
self._to_mask(gts, self.cocoGt)
self._to_mask(dts, self.cocoDt)
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
p = self.params
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
self.ious = {(imgId, catId): self.computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds} # ๆฏๅผ ๅพ็ๆ20ไธช็ฑปๅซ็iou
_abo = []
for overlap in self.ious.values():
if len(overlap) == 0:
continue
best_overlap = np.max(overlap, axis=0).mean()
_abo.append(best_overlap)
self.abo = sum(_abo) / len(_abo)
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet) for catId in catIds
for areaRng in p.areaRng for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt = dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = mask_utils.iou(d, g, iscrowd) # [dt_num, gt_num]
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
p = self.params
if p.useCats:
gt = self._gts[imgId, catId]
dt = self._dts[imgId, catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
if len(gt) == 0 and len(dt) == 0:
return None
for g in gt:
if g['ignore'] or (g['area'] < aRng[0] or g['area'] > aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(
self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T, G))
dtm = np.zeros((T, D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T, D))
if not len(ious) == 0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t, 1 - 1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind, gind] > 0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
break
# continue to next gt unless better match made
if ious[dind, gind] < iou:
continue
# if match successful and best so far, store
# appropriately
iou = ious[dind, gind]
m = gind
# if match made store id of match for both dt and gt
if m == -1:
continue
dtIg[tind, dind] = gtIg[m]
dtm[tind, dind] = gt[m]['id']
gtm[tind, m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1]
for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T,
0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p=None):
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones(
(T, R, K, A, M)) # -1 for the precision of absent categories
recall = -np.ones((T, K, A, M))
scores = -np.ones((T, R, K, A, M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [
n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng))
if a in setA
]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0 * A0 * I0
for a, a0 in enumerate(a_list):
Na = a0 * I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if e is not None]
if len(E) == 0:
continue
dtScores = np.concatenate(
[e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different
# results. mergesort is used to be consistent as Matlab
# implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate(
[e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:,
inds]
dtIg = np.concatenate(
[e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:,
inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg == 0)
if npig == 0:
continue
tps = np.logical_and(dtm, np.logical_not(dtIg))
fps = np.logical_and(np.logical_not(dtm),
np.logical_not(dtIg))
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp + tp + np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t, k, a, m] = rc[-1]
else:
recall[t, k, a, m] = 0
# numpy is slow without cython optimization for
# accessing elements use python array gets significant
# speed improvement
pr = pr.tolist()
q = q.tolist()
for i in range(nd - 1, 0, -1):
if pr[i] > pr[i - 1]:
pr[i - 1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except: # noqa: E722
pass
precision[t, :, k, a, m] = np.array(q)
scores[t, :, k, a, m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
def _summarize(self, iouThr=None, areaRng='all', maxDets=100):
p = self.params
iStr = '{:<21} {:<5} @[ IoU={:<4} ] = {:0.3f}' # noqa: E501
titleStr = 'Average Precision'
typeStr = '(AP)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [
i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng
]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:, :, :, aind, mind]
if len(s[s > -1]) == 0:
mean_s = -1
else:
mean_s = np.mean(s[s > -1])
print(iStr.format(titleStr, typeStr, iouStr, mean_s))
return mean_s
def _summarize_abo(self):
iStr = '{:<21} {:<5} @[ IoU={:<4} ] = {:0.3f}' # noqa: E501
titleStr = 'Average Best Overlap'
typeStr = '(ABO)'
iouStr = '/'
print(iStr.format(titleStr, typeStr, iouStr, self.abo))
return self.abo
def _summarize_det(self):
stats = np.zeros((5,))
stats[0] = self._summarize(iouThr=.25, maxDets=self.params.maxDets[2])
stats[1] = self._summarize(iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = self._summarize(iouThr=.7, maxDets=self.params.maxDets[2])
stats[3] = self._summarize(iouThr=.75, maxDets=self.params.maxDets[2])
stats[4] = self._summarize_abo()
return stats
def summarize(self):
if not self.eval:
raise Exception('Please run accumulate() first')
self.stats = self._summarize_det()
def __str__(self):
self.summarize()
class Params:
def __init__(self, iou_type='segm'):
self.imgIds = []
self.catIds = []
self.iouThrs = np.linspace(.25, 0.95, int(np.round((0.95 - .25) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
self.iouType = iou_type
self.useSegm = None
|
def changedp (A,V):
a=[[0]*(A+1) for x in xrange(len(V))]
for j in range (1,A+1):
a[0][j]=j
for i in range(1,len(V)):
for j in range (1,A+1):
if (j>=V[i]):
a[i][j]=min(a[i-1][j],1+a[i][j-V[i]])
else:
a[i][j]=a[i-1][j]
minChange=a[len(V)-1][A]
#the following code to trace back and find
#values of c[i]
m=[-1]*len(V)
x=len(V)-1
m[x]=0
y=A
print a[x][y]
value=1
while (value>0):
if(y<V[x]):
x=x-1
m[x]=m[x]+1
else:
if ((a[x-1][y])<(a[x][y-V[x]]+1)):
x=x-1
m[x]=m[x]+1
else:
y=y-V[x]
m[x]=m[x]+1
value=a[x][y]
for i in range (len(m)):
if (m[i]==-1):
m[i]=0
return (m, minChange)
K=31
value=[1,3,7,12]
c, result = changedp(K, value)
print c[0:len(c)]
print ("Min Result: %d" %result)
|
from itertools import product
import matplotlib.pyplot as plt
import mpl_extras as me
import tpsim as tp
import numpy as np
import warnings
import os
# --------------------------------------------------------------------------- #
# Simulation parameters
# --------------------------------------------------------------------------- #
## ---------- Simulation time
# Start time [1/wce]
t_start = 0
# Stop time [1/wce]
t_stop = 5 * 2 * np.pi
# Time step [1/wce]
dt = np.pi * 1e-3
# Number of time steps
Nt = int(t_stop / dt)
# Interval to log
log_interval = Nt // 10
## ---------- Background parameters
# Background magnetic field [nT]
B0 = 10
# Background electric field [mV/m]
eps = 1e-3
E0 = eps * tp.c * B0 * 1e-3
# Number density [1/cc]
n = 5
## ---------- Particle parameters
KE = np.array([10, 50])
GP = np.array([0, 0])
PA = np.array([45, 90])
Np = len(KE)
# Normalized position
xn, yn, zn = np.zeros((3, Np))
# Normalized velocity
uxn, uyn, uzn = tp.ES2US(KE, np.radians(GP), np.radians(PA)) / tp.c
## ---------- Electromagnetic field model
## Define the electromagnetic field here (background + perturbations)
def EM_model(t, x, y, z, ux, uy, uz):
"""Returns `Np`-dimensional arrays `Ex`, `Ey`, `Ez`, `Bx`, `By`, `Bz` in
normalized units.
"""
Ex = np.ones(Np) * eps
Ey = np.zeros(Np)
Ez = np.zeros(Np)
Bx = np.zeros(Np)
By = np.zeros(Np)
Bz = np.ones(Np)
return Ex, Ey, Ez, Bx, By, Bz
# --------------------------------------------------------------------------- #
# Post-processing
# --------------------------------------------------------------------------- #
def check_solution(X, Y, Z, UX, UY, UZ, s, tol=1e-3):
r"""
The analytical solution is given by
x(t) = x_0 + v_\bot \sin(t+\delta)
y(t) = y_0 + qq v_\bot \cos(t+\delta) - |ExB| t
v_x(t) = v_\bot\cos(t + \delta)
v_y(t) = -qq v_\bot\sin(t + \delta) - |ExB| (|B|=1)
"""
qq = tp.qq[s]
T = np.arange(Nt) * dt
vperp = np.sqrt(uxn ** 2 + uyn ** 2)
delta = np.arctan2(-qq * (uyn + eps), uxn)
# Solve for IC
x0 = xn - vperp * np.sin(delta)
y0 = yn - qq * vperp * np.cos(delta)
# Create solution arrays
XS, YS, ZS, UXS, UYS, UZS = np.zeros((6, Np, Nt))
# Loop through particles
for i in range(X.shape[0]):
XS[i, :] = x0[i] + vperp[i] * np.sin(T + delta[i])
YS[i, :] = y0[i] + qq * vperp[i] * np.cos(T + delta[i]) - eps * T
ZS[i, :] = zn[i] + uzn[i] * T
UXS[i, :] = vperp[i] * np.cos(T + delta[i])
UYS[i, :] = -qq * vperp[i] * np.sin(T + delta[i]) - eps
UZS[i, :] = uzn[i]
# Check
assert np.isclose(X, XS, rtol=tol, atol=tol).all()
assert np.isclose(Y, YS, rtol=tol, atol=tol).all()
assert np.isclose(Z, ZS, rtol=tol, atol=tol).all()
assert np.isclose(UX, UXS, rtol=tol, atol=tol).all()
assert np.isclose(UY, UYS, rtol=tol, atol=tol).all()
assert np.isclose(UZ, UZS, rtol=tol, atol=tol).all()
me.setup_mpl(tex=True)
# Loop through particles
for i in range(X.shape[0]):
# Create figure
fig, axes = plt.subplots(3, 2, figsize=(12, 6), sharex=True)
fig.subplots_adjust(wspace=0.3)
fig.suptitle(
f"Particle = {s}; KE0 = {KE[i]} eV; P0 = {PA[i]}$^\circ$"
)
# Plot solved solutions
axes[0, 0].plot(T, X[i, :], "-k")
axes[1, 0].plot(T, Y[i, :], "-k")
axes[2, 0].plot(T, Z[i, :], "-k")
axes[0, 1].plot(T, UX[i, :], "-k")
axes[1, 1].plot(T, UY[i, :], "-k")
axes[2, 1].plot(T, UZ[i, :], "-k")
# Plot analytical solutions
axes[0, 0].plot(T, XS[i, :], "--r")
axes[1, 0].plot(T, YS[i, :], "--r")
axes[2, 0].plot(T, ZS[i, :], "--r")
axes[0, 1].plot(T, UXS[i, :], "--r")
axes[1, 1].plot(T, UYS[i, :], "--r")
axes[2, 1].plot(T, UZS[i, :], "--r")
# Formats
axes[0, 0].set_ylabel("$x\\Omega_{c}/c$")
axes[1, 0].set_ylabel("$y\\Omega_{c}/c$")
axes[2, 0].set_ylabel("$z\\Omega_{c}/c$")
axes[0, 1].set_ylabel("$u_x/c$")
axes[1, 1].set_ylabel("$u_y/c$")
axes[2, 1].set_ylabel("$u_z/c$")
for (m, n) in np.ndindex(axes.shape):
ax = axes[m, n]
ax.tick_params(**me.params)
ax.set_xlim(T.min(), T.max())
if n == 2:
ax.set_xlabel("$t\\Omega_{c}$")
string = "electron" if s == "e-" else "ion"
fig.savefig(f"{string}_trajectories_{i}.png")
plt.close(fig)
# --------------------------------------------------------------------------- #
# Run simulation
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
for s in ["e-", "i"]:
# Initial conditions
t, x, y, z, ux, uy, uz = t_start, xn, yn, zn, uxn, uyn, uzn
# History arrays
X, Y, Z, UX, UY, UZ = np.zeros((6, Np, Nt))
X[:, 0] = x
Y[:, 0] = y
Z[:, 0] = z
UX[:, 0] = ux
UY[:, 0] = uy
UZ[:, 0] = uz
# Main loop
print(f"Starting main loop for {s}")
advance = tp.advance
for n in range(1, Nt):
# Advance particles
t, x, y, z, ux, uy, uz = advance(
t, x, y, z, ux, uy, uz, EM_model, dt, s=s
)
# Save to history arrays
X[:, n] = x
Y[:, n] = y
Z[:, n] = z
UX[:, n] = ux
UY[:, n] = uy
UZ[:, n] = uz
# Log
if n % log_interval == 0: print(f"Pushed {n} steps")
print(f"Done!")
# Post-processing
check_solution(X, Y, Z, UX, UY, UZ, s)
|
from typing import Union, Optional
from ..codes import OpCodes
class OutGoingResponse:
def __init__(
self,
req_id: int,
status: Optional[int] = None,
headers: Optional[Union[list, tuple]] = None,
body: Optional[str] = None
):
if req_id is not None:
assert type(req_id) == int, "req_id type is not int" # The server only really cares about this type.
if status is not None:
assert type(status) == int, "status type is not int" # The server only really cares about this type.
self.req_id = req_id
self.status = status
self.headers = headers
if body is None:
self.body = ""
else:
self.body = body
def to_dict(self):
return {
"op": OpCodes.HTTP_REQUEST,
"request_id": self.req_id,
"status": self.status,
"headers": self.headers,
"body": self.body
}
def __repr__(self):
return "OutGoingResponse(request_id={}, status_code={})".format(self.req_id, self.status)
|
import traceback
import discord
from discord.ext import commands
#from src.utils.execption import PermError
from utils.execption import PermError
from utils.create_embed import embeds
from data import colors
class handling(commands.Cog):
"""
์๋ฌ๋ฅผ ์ฒ๋ฆฌํ๋ ๊ณณ์ด์ผ
"""
def __init__(self,bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx: commands.Context, error: Exception):
print(str(traceback.format_exc()))
if isinstance(error, PermError.NotRegister):
return await embeds(ctx=ctx).NotRegister()
elif isinstance(error,commands.CommandOnCooldown):
return await ctx.reply(f"๋ง๋ ฅ์ด ๋ค ํ๋ณต๋๋ ค๋ฉด `{round(error.retry_after, 2)}`์ด ๋จ์์ด์..")
elif isinstance(error,commands.CommandNotFound):
return await ctx.reply(f"`{ctx.invoked_with}`?")
elif isinstance(error, commands.errors.PrivateMessageOnly):
await ctx.send(
embed=discord.Embed(
title='โ DM ์ ์ฉ ๋ช
๋ น์ด',
description='์ด ๋ช
๋ น์ด๋ ๊ฐ์ธ ๋ฉ์์ง์์๋ง ์ฌ์ฉํ ์ ์์ต๋๋ค!',
color=colors.ERROR
)
)
elif isinstance(error, commands.NotOwner):
await ctx.send(
embed=discord.Embed(
title='โ ๊ฐ๋ฐ์ ์ ์ฉ ๋ช
๋ น์ด',
description='์ด ๋ช
๋ น์ด๋ ๊ฐ๋ฐ์๋ง ์ฌ์ฉํ ์ ์์ต๋๋ค.',
color=colors.ERROR
)
)
else:
tb = ''.join(traceback.format_exception(type(error), error, error.__traceback__))
print(tb)
await ctx.send(
embed=discord.Embed(
title="โ ์ค๋ฅ๊ฐ ๋ฐ์ํ์ต๋๋ค!",
description=f'```python\n{tb}```',
color=colors.ERROR
)
)
def setup(bot):
bot.add_cog(handling(bot))
|
# Generated by Django 3.2.9 on 2021-11-08 17:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DateRange',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True, null=True)),
('done', models.BooleanField(default=False)),
('archive', models.BooleanField(default=False)),
('points', models.IntegerField(default=1)),
('many_dateranges', models.ManyToManyField(blank=True, to='agendjang.DateRange')),
('many_tags', models.ManyToManyField(blank=True, to='agendjang.Tag')),
],
),
migrations.AddField(
model_name='daterange',
name='task_myset',
field=models.ManyToManyField(blank=True, to='agendjang.Task'),
),
migrations.CreateModel(
name='ScheduledTask',
fields=[
('task_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='agendjang.task')),
('many_tasks', models.ManyToManyField(blank=True, related_name='linked_tasks', to='agendjang.Task')),
],
bases=('agendjang.task',),
),
]
|
def greet(people):
printout = []
for p in people:
to_print = 'hello {}'.format(p)
printout.append(to_print)
print(to_print)
return printout
if __name__ == '__main__':
everybody = [
'colin',
'pierre'
]
greet(everybody)
|
import torch
import torch.nn as nn
import numpy as np
from torch.distributions import Normal
def get_acq_fn(args):
if args.acq_fn.lower() == "ucb":
return UCB
elif args.acq_fn.lower() == "ei":
return EI
else:
return NoAF
class AcquisitionFunctionWrapper():
def __init__(self, model):
self.model = model
def __call__(self, x):
raise NotImplementedError()
def update(self, data):
self.fit(data)
def fit(self, data):
self.model.fit(data)
class NoAF(AcquisitionFunctionWrapper):
def __call__(self, x):
return self.l2r(self.model(x))
class UCB(AcquisitionFunctionWrapper):
def __init__(self, model, sequences):
super().__init__(model)
self.kappa = 0.1
def __call__(self, mean, std):
return mean + self.kappa * std
class EI(AcquisitionFunctionWrapper):
def __init__(self, model, sequences):
super().__init__(model)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.sigmoid = nn.Sigmoid()
self.best_f = None
self.sequences = sequences
def __call__(self, mean, std):
if self.best_f is None:
self.best_f = torch.tensor(self.model.get_fitness(self.sequences).max())
mean, std = torch.tensor(mean), torch.tensor(std)
self.best_f = self.best_f.to(mean)
# deal with batch evaluation and broadcasting
#view_shape = mean.shape[:-2] #if mean.dim() >= x.dim() else x.shape[:-2]
#mean = mean.view(view_shape)
#std = std.view(view_shape)
u = (mean - self.best_f.expand_as(mean)) / std
normal = Normal(torch.zeros_like(u), torch.ones_like(u))
ucdf = normal.cdf(u)
updf = torch.exp(normal.log_prob(u))
ei = std * (updf + u * ucdf)
return ei.cpu().numpy()
def update(self, data):
self.best_f = self._get_best_f(data)
self.fit(data)
|
import sys, pathlib
sys.path.append(pathlib.Path('..'))
|
# generated by datamodel-codegen:
# filename: openapi.yaml
# timestamp: 2021-12-31T02:59:10+00:00
from __future__ import annotations
from enum import Enum
from typing import Annotated, Any, List, Optional
from pydantic import BaseModel, Field
class InvalidParameterException(BaseModel):
__root__: Any
class InvalidS3ObjectException(InvalidParameterException):
pass
class UnsupportedDocumentException(InvalidParameterException):
pass
class DocumentTooLargeException(InvalidParameterException):
pass
class BadDocumentException(InvalidParameterException):
pass
class AccessDeniedException(InvalidParameterException):
pass
class ProvisionedThroughputExceededException(InvalidParameterException):
pass
class InternalServerError(InvalidParameterException):
pass
class ThrottlingException(InvalidParameterException):
pass
class HumanLoopQuotaExceededException(InvalidParameterException):
pass
class InvalidJobIdException(InvalidParameterException):
pass
class InvalidKMSKeyException(InvalidParameterException):
pass
class IdempotentParameterMismatchException(InvalidParameterException):
pass
class LimitExceededException(InvalidParameterException):
pass
class String(BaseModel):
__root__: str
class BlockType(Enum):
KEY_VALUE_SET = 'KEY_VALUE_SET'
PAGE = 'PAGE'
LINE = 'LINE'
WORD = 'WORD'
TABLE = 'TABLE'
CELL = 'CELL'
SELECTION_ELEMENT = 'SELECTION_ELEMENT'
class Percent(BaseModel):
__root__: Annotated[float, Field(ge=0.0, le=100.0)]
class TextType(Enum):
HANDWRITING = 'HANDWRITING'
PRINTED = 'PRINTED'
class UInteger(BaseModel):
__root__: Annotated[int, Field(ge=0.0)]
class NonEmptyString(BaseModel):
__root__: Annotated[str, Field(regex='.*\\S.*')]
class SelectionStatus(Enum):
SELECTED = 'SELECTED'
NOT_SELECTED = 'NOT_SELECTED'
class Float(BaseModel):
__root__: float
class BoundingBox(BaseModel):
"""
<p>The bounding box around the detected page, text, key-value pair, table, table cell, or selection element on a document page. The <code>left</code> (x-coordinate) and <code>top</code> (y-coordinate) are coordinates that represent the top and left sides of the bounding box. Note that the upper-left corner of the image is the origin (0,0). </p> <p>The <code>top</code> and <code>left</code> values returned are ratios of the overall document page size. For example, if the input image is 700 x 200 pixels, and the top-left coordinate of the bounding box is 350 x 50 pixels, the API returns a <code>left</code> value of 0.5 (350/700) and a <code>top</code> value of 0.25 (50/200).</p> <p>The <code>width</code> and <code>height</code> values represent the dimensions of the bounding box as a ratio of the overall document page dimension. For example, if the document page size is 700 x 200 pixels, and the bounding box width is 70 pixels, the width returned is 0.1. </p>
"""
Width: Optional[Float] = None
Height: Optional[Float] = None
Left: Optional[Float] = None
Top: Optional[Float] = None
class ClientRequestToken(BaseModel):
__root__: Annotated[
str, Field(max_length=64, min_length=1, regex='^[a-zA-Z0-9-_]+$')
]
class ContentClassifier(Enum):
FreeOfPersonallyIdentifiableInformation = 'FreeOfPersonallyIdentifiableInformation'
FreeOfAdultContent = 'FreeOfAdultContent'
class ContentClassifiers(BaseModel):
__root__: Annotated[List[ContentClassifier], Field(max_items=256)]
class ImageBlob(BaseModel):
__root__: Annotated[str, Field(max_length=10485760, min_length=1)]
class EntityType(Enum):
KEY = 'KEY'
VALUE = 'VALUE'
class ErrorCode(String):
pass
class ExpenseType(BaseModel):
"""
An object used to store information about the Type detected by Amazon Textract.
"""
Text: Optional[String] = None
Confidence: Optional[Percent] = None
class FeatureType(Enum):
TABLES = 'TABLES'
FORMS = 'FORMS'
class FlowDefinitionArn(BaseModel):
__root__: Annotated[str, Field(max_length=256)]
class JobId(ClientRequestToken):
pass
class MaxResults(BaseModel):
__root__: Annotated[int, Field(ge=1.0)]
class PaginationToken(BaseModel):
__root__: Annotated[str, Field(max_length=255, min_length=1, regex='.*\\S.*')]
class JobStatus(Enum):
IN_PROGRESS = 'IN_PROGRESS'
SUCCEEDED = 'SUCCEEDED'
FAILED = 'FAILED'
PARTIAL_SUCCESS = 'PARTIAL_SUCCESS'
class StatusMessage(String):
pass
class HumanLoopActivationConditionsEvaluationResults(BaseModel):
__root__: Annotated[str, Field(max_length=10240)]
class HumanLoopArn(FlowDefinitionArn):
pass
class HumanLoopActivationReason(String):
pass
class HumanLoopName(BaseModel):
__root__: Annotated[
str, Field(max_length=63, min_length=1, regex='^[a-z0-9](-*[a-z0-9])*')
]
class HumanLoopDataAttributes(BaseModel):
"""
Allows you to set attributes of the image. Currently, you can declare an image as free of personally identifiable information and adult content.
"""
ContentClassifiers: Optional[ContentClassifiers] = None
class IdList(BaseModel):
__root__: List[NonEmptyString]
class JobTag(BaseModel):
__root__: Annotated[
str, Field(max_length=64, min_length=1, regex='[a-zA-Z0-9_.\\-:]+')
]
class KMSKeyId(BaseModel):
__root__: Annotated[
str,
Field(
max_length=2048,
min_length=1,
regex='^[A-Za-z0-9][A-Za-z0-9:_/+=,@.-]{0,2048}$',
),
]
class SNSTopicArn(BaseModel):
__root__: Annotated[
str,
Field(
max_length=1024,
min_length=20,
regex='(^arn:([a-z\\d-]+):sns:[a-zA-Z\\d-]{1,20}:\\w{12}:.+$)',
),
]
class RoleArn(BaseModel):
__root__: Annotated[
str,
Field(
max_length=2048,
min_length=20,
regex='arn:([a-z\\d-]+):iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+',
),
]
class NotificationChannel(BaseModel):
"""
The Amazon Simple Notification Service (Amazon SNS) topic to which Amazon Textract publishes the completion status of an asynchronous document operation, such as <a>StartDocumentTextDetection</a>.
"""
SNSTopicArn: SNSTopicArn
RoleArn: RoleArn
class S3Bucket(BaseModel):
__root__: Annotated[
str, Field(max_length=255, min_length=3, regex='[0-9A-Za-z\\.\\-_]*')
]
class S3ObjectName(BaseModel):
__root__: Annotated[str, Field(max_length=1024, min_length=1, regex='.*\\S.*')]
class OutputConfig(BaseModel):
"""
<p>Sets whether or not your output will go to a user created bucket. Used to set the name of the bucket, and the prefix on the output file.</p> <p> <code>OutputConfig</code> is an optional parameter which lets you adjust where your output will be placed. By default, Amazon Textract will store the results internally and can only be accessed by the Get API operations. With OutputConfig enabled, you can set the name of the bucket the output will be sent to and the file prefix of the results where you can download your results. Additionally, you can set the <code>KMSKeyID</code> parameter to a customer master key (CMK) to encrypt your output. Without this parameter set Amazon Textract will encrypt server-side using the AWS managed CMK for Amazon S3.</p> <p>Decryption of Customer Content is necessary for processing of the documents by Amazon Textract. If your account is opted out under an AI services opt out policy then all unencrypted Customer Content is immediately and permanently deleted after the Customer Content has been processed by the service. No copy of of the output is retained by Amazon Textract. For information about how to opt out, see <a href="https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_ai-opt-out.html"> Managing AI services opt-out policy. </a> </p> <p>For more information on data privacy, see the <a href="https://aws.amazon.com/compliance/data-privacy-faq/">Data Privacy FAQ</a>.</p>
"""
S3Bucket: S3Bucket
S3Prefix: Optional[S3ObjectName] = None
class Pages1(BaseModel):
__root__: List[UInteger]
class Point(BaseModel):
"""
<p>The X and Y coordinates of a point on a document page. The X and Y values that are returned are ratios of the overall document page size. For example, if the input document is 700 x 200 and the operation returns X=0.5 and Y=0.25, then the point is at the (350,50) pixel coordinate on the document page.</p> <p>An array of <code>Point</code> objects, <code>Polygon</code>, is returned by <a>DetectDocumentText</a>. <code>Polygon</code> represents a fine-grained polygon around detected text. For more information, see Geometry in the Amazon Textract Developer Guide. </p>
"""
X: Optional[Float] = None
Y: Optional[Float] = None
class RelationshipType(Enum):
VALUE = 'VALUE'
CHILD = 'CHILD'
COMPLEX_FEATURES = 'COMPLEX_FEATURES'
class Relationship(BaseModel):
"""
<p>Information about how blocks are related to each other. A <code>Block</code> object contains 0 or more <code>Relation</code> objects in a list, <code>Relationships</code>. For more information, see <a>Block</a>.</p> <p>The <code>Type</code> element provides the type of the relationship for all blocks in the <code>IDs</code> array. </p>
"""
Type: Optional[RelationshipType] = None
Ids: Optional[IdList] = None
class S3ObjectVersion(S3ObjectName):
pass
class Warning(BaseModel):
"""
A warning about an issue that occurred during asynchronous text analysis (<a>StartDocumentAnalysis</a>) or asynchronous document text detection (<a>StartDocumentTextDetection</a>).
"""
ErrorCode: Optional[ErrorCode] = None
Pages: Optional[Pages1] = None
class GetDocumentAnalysisRequest(BaseModel):
JobId: JobId
MaxResults: Optional[MaxResults] = None
NextToken: Optional[PaginationToken] = None
class GetDocumentTextDetectionRequest(BaseModel):
JobId: JobId
MaxResults: Optional[MaxResults] = None
NextToken: Optional[PaginationToken] = None
class StartDocumentAnalysisResponse(BaseModel):
JobId: Optional[JobId] = None
class StartDocumentTextDetectionResponse(StartDocumentAnalysisResponse):
pass
class FeatureTypes(BaseModel):
__root__: List[FeatureType]
class HumanLoopConfig(BaseModel):
"""
Sets up the human review workflow the document will be sent to if one of the conditions is met. You can also set certain attributes of the image before review.
"""
HumanLoopName: HumanLoopName
FlowDefinitionArn: FlowDefinitionArn
DataAttributes: Optional[HumanLoopDataAttributes] = None
class DocumentMetadata(BaseModel):
"""
Information about the input document.
"""
Pages: Optional[UInteger] = None
class RelationshipList(BaseModel):
__root__: List[Relationship]
class EntityTypes(BaseModel):
__root__: List[EntityType]
class S3Object(BaseModel):
"""
<p>The S3 bucket name and file name that identifies the document.</p> <p>The AWS Region for the S3 bucket that contains the document must match the Region that you use for Amazon Textract operations.</p> <p>For Amazon Textract to process a file in an S3 bucket, the user must have permission to access the S3 bucket and file. </p>
"""
Bucket: Optional[S3Bucket] = None
Name: Optional[S3ObjectName] = None
Version: Optional[S3ObjectVersion] = None
class DocumentLocation(BaseModel):
"""
<p>The Amazon S3 bucket that contains the document to be processed. It's used by asynchronous operations such as <a>StartDocumentTextDetection</a>.</p> <p>The input document can be an image file in JPEG or PNG format. It can also be a file in PDF format.</p>
"""
S3Object: Optional[S3Object] = None
class Polygon(BaseModel):
__root__: List[Point]
class Warnings(BaseModel):
__root__: List[Warning]
class HumanLoopActivationReasons(BaseModel):
__root__: Annotated[List[HumanLoopActivationReason], Field(min_items=1)]
class StartDocumentAnalysisRequest(BaseModel):
DocumentLocation: DocumentLocation
FeatureTypes: FeatureTypes
ClientRequestToken: Optional[ClientRequestToken] = None
JobTag: Optional[JobTag] = None
NotificationChannel: Optional[NotificationChannel] = None
OutputConfig: Optional[OutputConfig] = None
KMSKeyId: Optional[KMSKeyId] = None
class StartDocumentTextDetectionRequest(BaseModel):
DocumentLocation: DocumentLocation
ClientRequestToken: Optional[ClientRequestToken] = None
JobTag: Optional[JobTag] = None
NotificationChannel: Optional[NotificationChannel] = None
OutputConfig: Optional[OutputConfig] = None
KMSKeyId: Optional[KMSKeyId] = None
class Document(BaseModel):
"""
<p>The input document, either as bytes or as an S3 object.</p> <p>You pass image bytes to an Amazon Textract API operation by using the <code>Bytes</code> property. For example, you would use the <code>Bytes</code> property to pass a document loaded from a local file system. Image bytes passed by using the <code>Bytes</code> property must be base64 encoded. Your code might not need to encode document file bytes if you're using an AWS SDK to call Amazon Textract API operations. </p> <p>You pass images stored in an S3 bucket to an Amazon Textract API operation by using the <code>S3Object</code> property. Documents stored in an S3 bucket don't need to be base64 encoded.</p> <p>The AWS Region for the S3 bucket that contains the S3 object must match the AWS Region that you use for Amazon Textract operations.</p> <p>If you use the AWS CLI to call Amazon Textract operations, passing image bytes using the Bytes property isn't supported. You must first upload the document to an Amazon S3 bucket, and then call the operation using the S3Object property.</p> <p>For Amazon Textract to process an S3 object, the user must have permission to access the S3 object. </p>
"""
Bytes: Optional[ImageBlob] = None
S3Object: Optional[S3Object] = None
class HumanLoopActivationOutput(BaseModel):
"""
Shows the results of the human in the loop evaluation. If there is no HumanLoopArn, the input did not trigger human review.
"""
HumanLoopArn: Optional[HumanLoopArn] = None
HumanLoopActivationReasons: Optional[HumanLoopActivationReasons] = None
HumanLoopActivationConditionsEvaluationResults: Optional[
HumanLoopActivationConditionsEvaluationResults
] = None
class Geometry(BaseModel):
"""
Information about where the following items are located on a document page: detected page, text, key-value pairs, tables, table cells, and selection elements.
"""
BoundingBox: Optional[BoundingBox] = None
Polygon: Optional[Polygon] = None
class Block(BaseModel):
"""
<p>A <code>Block</code> represents items that are recognized in a document within a group of pixels close to each other. The information returned in a <code>Block</code> object depends on the type of operation. In text detection for documents (for example <a>DetectDocumentText</a>), you get information about the detected words and lines of text. In text analysis (for example <a>AnalyzeDocument</a>), you can also get information about the fields, tables, and selection elements that are detected in the document.</p> <p>An array of <code>Block</code> objects is returned by both synchronous and asynchronous operations. In synchronous operations, such as <a>DetectDocumentText</a>, the array of <code>Block</code> objects is the entire set of results. In asynchronous operations, such as <a>GetDocumentAnalysis</a>, the array is returned over one or more responses.</p> <p>For more information, see <a href="https://docs.aws.amazon.com/textract/latest/dg/how-it-works.html">How Amazon Textract Works</a>.</p>
"""
BlockType: Optional[BlockType] = None
Confidence: Optional[Percent] = None
Text: Optional[String] = None
TextType: Optional[TextType] = None
RowIndex: Optional[UInteger] = None
ColumnIndex: Optional[UInteger] = None
RowSpan: Optional[UInteger] = None
ColumnSpan: Optional[UInteger] = None
Geometry: Optional[Geometry] = None
Id: Optional[NonEmptyString] = None
Relationships: Optional[RelationshipList] = None
EntityTypes: Optional[EntityTypes] = None
SelectionStatus: Optional[SelectionStatus] = None
Page: Optional[UInteger] = None
class ExpenseDetection(BaseModel):
"""
An object used to store information about the Value or Label detected by Amazon Textract.
"""
Text: Optional[String] = None
Geometry: Optional[Geometry] = None
Confidence: Optional[Percent] = None
class ExpenseField(BaseModel):
"""
Breakdown of detected information, seperated into the catagories Type, LableDetection, and ValueDetection
"""
Type: Optional[ExpenseType] = None
LabelDetection: Optional[ExpenseDetection] = None
ValueDetection: Optional[ExpenseDetection] = None
PageNumber: Optional[UInteger] = None
class AnalyzeDocumentRequest(BaseModel):
Document: Document
FeatureTypes: FeatureTypes
HumanLoopConfig: Optional[HumanLoopConfig] = None
class AnalyzeExpenseRequest(BaseModel):
Document: Document
class DetectDocumentTextRequest(BaseModel):
Document: Document
class BlockList(BaseModel):
__root__: List[Block]
class ExpenseFieldList(BaseModel):
__root__: List[ExpenseField]
class LineItemFields(BaseModel):
"""
A structure that holds information about the different lines found in a document's tables.
"""
LineItemExpenseFields: Optional[ExpenseFieldList] = None
class LineItemList(BaseModel):
__root__: List[LineItemFields]
class LineItemGroup(BaseModel):
"""
A grouping of tables which contain LineItems, with each table identified by the table's <code>LineItemGroupIndex</code>.
"""
LineItemGroupIndex: Optional[UInteger] = None
LineItems: Optional[LineItemList] = None
class AnalyzeDocumentResponse(BaseModel):
DocumentMetadata: Optional[DocumentMetadata] = None
Blocks: Optional[BlockList] = None
HumanLoopActivationOutput: Optional[HumanLoopActivationOutput] = None
AnalyzeDocumentModelVersion: Optional[String] = None
class DetectDocumentTextResponse(BaseModel):
DocumentMetadata: Optional[DocumentMetadata] = None
Blocks: Optional[BlockList] = None
DetectDocumentTextModelVersion: Optional[String] = None
class GetDocumentAnalysisResponse(BaseModel):
DocumentMetadata: Optional[DocumentMetadata] = None
JobStatus: Optional[JobStatus] = None
NextToken: Optional[PaginationToken] = None
Blocks: Optional[BlockList] = None
Warnings: Optional[Warnings] = None
StatusMessage: Optional[StatusMessage] = None
AnalyzeDocumentModelVersion: Optional[String] = None
class GetDocumentTextDetectionResponse(BaseModel):
DocumentMetadata: Optional[DocumentMetadata] = None
JobStatus: Optional[JobStatus] = None
NextToken: Optional[PaginationToken] = None
Blocks: Optional[BlockList] = None
Warnings: Optional[Warnings] = None
StatusMessage: Optional[StatusMessage] = None
DetectDocumentTextModelVersion: Optional[String] = None
class LineItemGroupList(BaseModel):
__root__: List[LineItemGroup]
class ExpenseDocument(BaseModel):
"""
The structure holding all the information returned by AnalyzeExpense
"""
ExpenseIndex: Optional[UInteger] = None
SummaryFields: Optional[ExpenseFieldList] = None
LineItemGroups: Optional[LineItemGroupList] = None
class ExpenseDocumentList(BaseModel):
__root__: List[ExpenseDocument]
class AnalyzeExpenseResponse(BaseModel):
DocumentMetadata: Optional[DocumentMetadata] = None
ExpenseDocuments: Optional[ExpenseDocumentList] = None
|
import os
from alttprbot.alttprgen.randomizer import roll_aosr
from alttprbot.tournament.core import TournamentConfig
from alttprbot_discord.bot import discordbot
from .sglcore import SGLRandomizerTournamentRace
class AOSR(SGLRandomizerTournamentRace):
async def configuration(self):
guild = discordbot.get_guild(590331405624410116)
return TournamentConfig(
guild=guild,
racetime_category='sgl',
racetime_goal="Aria of Sorrow Randomizer",
event_slug="sgl21aosr",
audit_channel=discordbot.get_channel(772351829022474260),
commentary_channel=discordbot.get_channel(631564559018098698),
coop=False,
gsheet_id=os.environ.get("SGL_RESULTS_SHEET"),
auto_record=True
)
async def roll(self):
self.seed_id, self.permalink = roll_aosr(
logic='AreaTechTiers',
nodupes='false',
panther='FirstAlways',
area='AreaRandom',
boss='Dead-endShuffle',
enemy='Vanilla',
itempool='Standard',
weight=2.5,
grahm='BookSouls',
kicker='false',
startshop='Unlocked30k',
shopprice='RandHV',
shopSouls='Half',
levelexp='Vanilla',
telestart='false',
mapassist='false',
doublechaos='false'
)
@property
def seed_info(self):
return self.permalink
|
from __future__ import print_function
import unittest
import ospsurvey.version
class TestSample(unittest.TestCase):
def test_sample(self):
self.assertTrue(True)
v = ospsurvey.version.version()
print(v)
|
from heapq import nlargest
from typing import List
Scores = List[int]
def latest(scores: Scores) -> int:
"""The last added score."""
return scores[-1]
def personal_best(scores: Scores) -> int:
"""The highest score."""
return max(scores)
def personal_top_three(scores: Scores) -> Scores:
"""The three highest scores."""
return nlargest(3, scores)
|
IMAGE_MAX_WH = 300
|
# Exercise 13
# https://learnpythonthehardway.org/book/ex13.html
from sys import argv
script, first, second, third = argv
print "The script is called:", script
print "Your first variable is:", first
print "Your second variable is:", second
print "Your third variable is:", third
opinion = raw_input("What do you think of this script? ")
print "You think %s is %s!" % (script, opinion)
|
import machine, time
import ssd1306, ds18x20, onewire
ENABLE_OLED = True
def main():
while True:
show()
time.sleep_ms(250)
def show():
wtemp = read_ds18(750) # water temperature
print("W=" + wtemp)
if ENABLE_OLED == True:
oled.fill(0)
oled.text("[water]", 0, 0)
oled.text("W=" + wtemp, 0, 10) # water temperature
oled.show()
def read_ds18(t):
ds18.convert_temp()
time.sleep_ms(t)
wtemp = ds18.read_temp(rom)
return ("{:3.1f}C".format(wtemp))
if __name__ == "__main__":
ow = onewire.OneWire(machine.Pin(16))
ds18 = ds18x20.DS18X20(ow)
rom = None
roms = ds18.scan()
for rom in roms:
print('Found DS devices: ', rom)
break
if ENABLE_OLED == True:
i2c = machine.I2C(scl=machine.Pin(4), sda=machine.Pin(5))
oled = ssd1306.SSD1306_I2C(width=128, height=64, i2c=i2c)
main()
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from . import models, serializers
class Notifications(APIView):
def get(self, request, format =None):
user =request.user
notification = models.Notification.objects.filter(to = user)
serializer = serializers.NotificationSerializer(notification, many = True, context={"request": request})
return Response(data = serializer.data, status = status.HTTP_200_OK)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess data"""
import argparse
import os
import numpy as np
parser = argparse.ArgumentParser(description='Postprocess of Hypertext Inference')
parser.add_argument('--result_Path', type=str, default='./result_Files',
help='result path')
parser.add_argument('--label_Path', default='./result_Files', type=str,
help='label file path')
parser.add_argument('--batch_size', default=1, type=int, help='batch_size')
args = parser.parse_args()
dirs = os.listdir(args.label_Path)
cur, total = 0, 0
print('---------- start cal acc ----------')
for file in dirs:
label = np.fromfile(os.path.join(args.label_Path, file), dtype=np.int32)
file_name = file.split('.')[0]
idx = file_name.split('_')[-1]
predict_file_name = "hypertext_ids_bs" + str(args.batch_size) + "_" + str(idx) + "_0.bin"
predict_file = os.path.join(args.result_Path, predict_file_name)
predict = np.fromfile(predict_file, dtype=np.int32)
acc = predict == label
acc = np.array(acc, dtype=np.float32)
cur += (np.sum(acc, -1))
total += len(acc)
print('acc:', cur / total)
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from socket import socket, AF_INET, SOCK_DGRAM
from common.Struct import Struct
from dcgm_fluentd import DcgmFluentd
def test_send_to_fluentd():
# Can't create a proper closure in Python, so we create an object which acts
# as a closure
namespace = Struct(message=None, dest=None)
def mysendto(_message, _dest):
namespace.message = _message
namespace.dest = _dest
mysock = Struct(sendto=mysendto)
dr = DcgmFluentd('FAKE_HOST', 101010)
# Assert that we are sending over UDP
assert dr.m_sock.family == AF_INET
assert dr.m_sock.type == SOCK_DGRAM
dr.m_sock = mysock
dr.SendToFluentd('message')
assert(namespace.message == 'message')
assert(namespace.dest == ('FAKE_HOST', 101010))
def test_custom_json_handler():
namespace = Struct(arg=None)
def MySendToFluentd(json):
namespace.arg = json # pylint: disable=no-member
dr = DcgmFluentd('FAKE_HOST', 101010)
dr.SendToFluentd = MySendToFluentd
dr.CustomJsonHandler('value')
assert namespace.arg == 'value' # pylint: disable=no-member
|
import argparse
import json
import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from matplotlib.ticker import FuncFormatter
from rl_baselines.visualize import loadCsv, movingAverage, loadData
from srl_zoo.utils import printGreen, printYellow, printRed
# Init seaborn
sns.set()
# Style for the title
fontstyle = {'fontname': 'DejaVu Sans', 'fontsize': 16}
# Modified Colorbrewer Paired_12, you can use palettable to retrieve it
colors = [[166, 206, 227], [31, 120, 180], [178, 223, 138], [51, 160, 44], [251, 154, 153], [227, 26, 28],
[253, 191, 111], [255, 127, 0], [202, 178, 214], [106, 61, 154], [143, 156, 212], [64, 57, 178], [255, 255, 153], [177, 89, 40],
[10, 10, 10], [0, 0, 0]]
colors = [(r / 255, g / 255, b / 255) for (r, g, b) in colors]
lightcolors = colors[0::2]
darkcolors = colors[1::2]
# Default y-limits for the plot
# Kuka Arm
Y_LIM_SPARSE_REWARD = [0, 5]
# Mobile robot
Y_LIM_SPARSE_REWARD = [-3, 250]
# Relative: [-150, -50]
# Normal: [-70, -35]
Y_LIM_SHAPED_REWARD = [-150, -50]
def loadEpisodesData(folder):
"""
:param folder: (str)
:return: (numpy array, numpy array) or (None, None)
"""
result, _ = loadCsv(folder)
if len(result) == 0:
return None, None
y = np.array(result)[:, 1]
x = np.arange(len(y))
return x, y
def millions(x, pos):
"""
formatter for matplotlib
The two args are the value and tick position
:param x: (float)
:param pos: (int) tick position (not used here
:return: (str)
"""
return '{:.1f}M'.format(x * 1e-6)
def plotGatheredExperiments(folders, algo, y_limits, window=40, title="", min_num_x=-1,
timesteps=False, output_file="", no_display=False):
"""
Compute mean and standard error for several experiments and plot the learning curve
:param folders: ([str]) Log folders, where the monitor.csv are stored
:param window: (int) Smoothing window
:param algo: (str) name of the RL algo
:param title: (str) plot title
:param min_num_x: (int) Minimum number of episode/timesteps to keep an experiment (default: -1, no minimum)
:param timesteps: (bool) Plot timesteps instead of episodes
:param y_limits: ([float]) y-limits for the plot
:param output_file: (str) Path to a file where the plot data will be saved
:param no_display: (bool) Set to true, the plot won't be displayed (useful when only saving plot)
"""
y_list = []
x_list = []
ok = False
for folder in folders:
if timesteps:
x, y = loadData(folder, smooth=1, bin_size=100)
if x is not None:
x, y = np.array(x), np.array(y)
else:
x, y = loadEpisodesData(folder)
if x is None or (min_num_x > 0 and y.shape[0] < min_num_x):
printYellow("Skipping {}".format(folder))
continue
if y.shape[0] <= window:
printYellow("Folder {}".format(folder))
printYellow("Not enough episodes for current window size = {}".format(window))
continue
ok = True
y = movingAverage(y, window)
y_list.append(y)
# Truncate x
x = x[len(x) - len(y):]
x_list.append(x)
if not ok:
printRed("Not enough data to plot anything with current config." +
" Consider decreasing --min-x")
return
lengths = list(map(len, x_list))
min_x, max_x = np.min(lengths), np.max(lengths)
print("Min x: {}".format(min_x))
print("Max x: {}".format(max_x))
for i in range(len(x_list)):
x_list[i] = x_list[i][:min_x]
y_list[i] = y_list[i][:min_x]
x = np.array(x_list)[0]
y = np.array(y_list)
printGreen("{} Experiments".format(y.shape[0]))
print("Min, Max rewards:", np.min(y), np.max(y))
fig = plt.figure(title)
# Compute mean for different seeds
m = np.mean(y, axis=0)
# Compute standard error
s = np.squeeze(np.asarray(np.std(y, axis=0)))
n = y.shape[0]
plt.fill_between(x, m - s / np.sqrt(n), m + s / np.sqrt(n), color=lightcolors[0])
plt.plot(x, m, color=darkcolors[0], label=algo, linewidth=1)
if timesteps:
formatter = FuncFormatter(millions)
plt.xlabel('Number of Timesteps')
fig.axes[0].xaxis.set_major_formatter(formatter)
else:
plt.xlabel('Number of Episodes')
plt.ylabel('Rewards')
plt.title(title, **fontstyle)
plt.ylim(y_limits)
plt.legend(framealpha=0.5, labelspacing=0.01, loc='lower right', fontsize=16)
if output_file != "":
printGreen("Saving aggregated data to {}.npz".format(output_file))
np.savez(output_file, x=x, y=y)
if not no_display:
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Plot trained agent")
parser.add_argument('-i', '--log-dir', help='folder with the saved agent model', type=str, required=True)
parser.add_argument('-o', '--output-file', help='Where to save the aggregated data', type=str, default="")
parser.add_argument('--episode_window', type=int, default=40,
help='Episode window for moving average plot (default: 40)')
parser.add_argument('--min-x', type=int, default=-1,
help='Minimum number of x-ticks to keep an experiment (default: -1, no minimum)')
parser.add_argument('--y-lim', nargs=2, type=float, default=[-1, -1], help="limits for the y axis")
parser.add_argument('--shape-reward', action='store_true', default=False,
help='Shape the reward (reward = - distance) instead of a sparse reward')
parser.add_argument('--timesteps', action='store_true', default=False,
help='Plot timesteps instead of episodes')
parser.add_argument('--no-display', action='store_true', default=False, help='Do not display plot')
args = parser.parse_args()
y_limits = args.y_lim
if y_limits[0] == y_limits[1]:
if args.shape_reward:
y_limits = Y_LIM_SHAPED_REWARD
else:
y_limits = Y_LIM_SPARSE_REWARD
print("Using default limits:", y_limits)
# TODO: check that the parameters are the same between Experiments
folders = []
other = []
train_args = {}
for folder in os.listdir(args.log_dir):
path = "{}/{}/".format(args.log_dir, folder)
env_globals = json.load(open(path + "env_globals.json", 'r'))
train_args = json.load(open(path + "args.json", 'r'))
if train_args["shape_reward"] == args.shape_reward:
folders.append(path)
else:
other.append(path)
if len(folders) == 0 and len(other) == 0:
printYellow("No experiment found. Is the folder path {} correct?".format(args.log_dir))
exit()
elif len(folders) == 0:
printYellow("No experiments found with the given criterion. However {} experiments".format(len(other)) +
" where found {} reward shaping. ".format("without" if args.shape_reward else "with") +
"Did you mean {} the flag '--shape-reward'?".format("without" if args.shape_reward else "with"))
exit()
srl_model = train_args['srl_model'] if train_args['srl_model'] != "" else "raw pixels"
if args.timesteps:
title = srl_model + " [Timesteps]"
else:
title = srl_model + " [Episodes]"
plotGatheredExperiments(folders, train_args['algo'], y_limits=y_limits, window=args.episode_window,
title=title, min_num_x=args.min_x, no_display=args.no_display,
timesteps=args.timesteps, output_file=args.output_file)
|
import os
import autograd.numpy as np
from autograd import value_and_grad
from scipy.optimize import minimize
from util import get_median_inter_mnist, Kernel, load_data, ROOT_PATH,_sqdist,nystrom_decomp,remove_outliers,nystrom_decomp, chol_inv
from scipy.sparse import csr_matrix
import random
import time
Nfeval = 1
seed = 527
np.random.seed(seed)
random.seed(seed)
JITTER = 1e-7
nystr_M = 300
EYE_nystr = np.eye(nystr_M)
__sparse_fmt = csr_matrix
opt_params = None
prev_norm = None
opt_test_err = None
def experiment(sname, seed, nystr=True):
def LMO_err(params,M=2,verbal=False):
global Nfeval
params = np.exp(params)
al,bl = params[:-1],params[-1] # params[:int(n_params/2)], params[int(n_params/2):] # [np.exp(e) for e in params]
if train.x.shape[1]<5:
train_L = bl**2*np.exp(-train_L0/al**2/2)+1e-4*EYEN
else:
train_L,dev_L = 0,0
for i in range(len(al)):
train_L += train_L0[i]/al[i]**2
train_L = bl*bl*np.exp(-train_L/2)+1e-4*EYEN
tmp_mat = train_L@eig_vec_K
C = train_L-tmp_mat@np.linalg.inv(eig_vec_K.T@tmp_mat/N2+inv_eig_val)@tmp_mat.T/N2
c = C @ W_nystr_Y*N2
c_y = c-train.y
lmo_err = 0
N = 0
for ii in range(1):
permutation = np.random.permutation(train.x.shape[0])
for i in range(0,train.x.shape[0],M):
indices = permutation[i:i + M]
K_i = train_W[np.ix_(indices,indices)]*N2
C_i = C[np.ix_(indices,indices)]
c_y_i = c_y[indices]
b_y = np.linalg.inv(np.eye(M)-C_i@K_i)@c_y_i
lmo_err += b_y.T@K_i@b_y
N += 1
return lmo_err[0,0]/M**2
def callback0(params):
global Nfeval, prev_norm, opt_params, opt_test_err
if Nfeval %1 == 0:
params = np.exp(params)
print('params:',params)
al,bl = params[:-1],params[-1]
if train.x.shape[1]<5:
train_L = bl**2*np.exp(-train_L0/al**2/2)+1e-4*EYEN
test_L = bl**2*np.exp(-test_L0/al**2/2)
else:
train_L,test_L = 0,0
for i in range(len(al)):
train_L += train_L0[i]/al[i]**2
test_L += test_L0[i]/al[i]**2
train_L = bl*bl*np.exp(-train_L/2)+1e-4*EYEN
test_L = bl*bl*np.exp(-test_L/2)
if nystr:
tmp_mat = eig_vec_K.T@train_L
alpha = EYEN-eig_vec_K@np.linalg.inv(tmp_mat@eig_vec_K/N2+inv_eig_val)@tmp_mat/N2
alpha = alpha@W_nystr_Y*N2
else:
LWL_inv = chol_inv(train_L@train_W@train_L+train_L/N2+JITTER*EYEN)
alpha = LWL_inv@train_L@train_W@train.y
pred_mean = test_L@alpha
test_err = ((pred_mean-test.g)**2).mean()
norm = alpha.T @ train_L @ alpha
Nfeval += 1
if prev_norm is not None:
if norm[0,0]/prev_norm >=3:
if opt_test_err is None:
opt_test_err = test_err
opt_params = params
print(True,opt_params, opt_test_err,prev_norm, norm[0,0])
raise Exception
if prev_norm is None or norm[0,0]<= prev_norm:
prev_norm = norm[0,0]
opt_test_err = test_err
opt_params = params
print(True,opt_params, opt_test_err, prev_norm, norm[0,0])
train, dev, test = load_data(ROOT_PATH+'/data/'+sname+'/main_orig.npz')
del dev
# avoid same indices when run on the cluster
for _ in range(seed+1):
random_indices = np.sort(np.random.choice(range(train.x.shape[0]),nystr_M,replace=False))
EYEN = np.eye(train.x.shape[0])
N2 = train.x.shape[0]**2
# precompute to save time on parallized computation
if train.z.shape[1] < 5:
ak = get_median_inter_mnist(train.z)
else:
ak = np.load(ROOT_PATH+'/mnist_precomp/{}_ak.npy'.format(sname))
train_W = np.load(ROOT_PATH+'/mnist_precomp/{}_train_K0.npy'.format(sname))
train_W = (np.exp(-train_W/ak/ak/2)+np.exp(-train_W/ak/ak/200)+np.exp(-train_W/ak/ak*50))/3/N2
if train.x.shape[1]<5:
train_L0 = _sqdist(train.x,None)
test_L0 = _sqdist(test.x,train.x)
else:
L0s=np.load(ROOT_PATH+'/mnist_precomp/{}_Ls.npz'.format(sname))
train_L0 = L0s['train_L0']
# dev_L0 = L0s['dev_L0']
test_L0 = L0s['test_L0']
del L0s
if train.x.shape[1]<5:
params0 = np.random.randn(2)*0.1
else:
params0 = np.random.randn(len(train_L0)+1)*0.1
bounds = None
eig_val_K,eig_vec_K = nystrom_decomp(train_W*N2, random_indices)
W_nystr_Y = eig_vec_K @ np.diag(eig_val_K) @ eig_vec_K.T@train.y/N2
inv_eig_val = np.diag(1/eig_val_K/N2)
obj_grad = value_and_grad(lambda params: LMO_err(params))
res = minimize(obj_grad, x0=params0,bounds=bounds, method='L-BFGS-B',jac=True,options={'maxiter':5000,'disp':True,'ftol':0},callback=callback0)
PATH = ROOT_PATH + "/MMR_IVs/results/"+ sname + "/"
os.makedirs(PATH, exist_ok=True)
np.save(PATH+'LMO_errs_{}_nystr.npy'.format(seed),[opt_params,prev_norm,opt_test_err])
if __name__ == '__main__':
snames = ['mnist_z','mnist_x','mnist_xz']
for sname in snames:
for seed in range(100):
experiment(sname,seed)
PATH = ROOT_PATH + "/MMR_IVs/results/"+ sname + "/"
ress = []
for seed in range(100):
filename = PATH+'LMO_errs_{}_nystr.npy'.format(seed)
if os.path.exists(filename):
res = np.load(filename,allow_pickle=True)
if res[-1] is not None:
ress += [res[-1]]
ress = np.array(ress)
ress = remove_outliers(ress)
print(np.nanmean(ress),np.nanstd(ress))
|
import pandas as pd
import os,re,gzip
from igf_data.utils.fileutils import check_file_path
def identify_fastq_pair(input_list,sort_output=True,check_count=False):
'''
A method for fastq read pair identification
:param input_list: A list of input fastq files
:param sort_output: Sort output list, default true
:param check_count: Check read count for fastq pair, only available if sort_output is True, default False
:returns: A list for read1 files and another list of read2 files
'''
try:
read1_list=list()
read2_list=list()
read1_pattern=re.compile(r'\S+_R1_\d+\.fastq(\.gz)?')
read2_pattern=re.compile(r'\S+_R2_\d+\.fastq?(\.gz)?')
for file in input_list:
if re.match(read1_pattern,file):
read1_list.append(file)
if re.match(read2_pattern,file):
read2_list.append(file)
if len(read1_list) == 0:
raise ValueError('No fastq file found for read 1')
if len(read1_list) != len(read2_list) and \
len(read2_list) > 0:
raise ValueError('Number of fastq files are not same for read 1 :{0} and read2:{1}'.\
format(len(read1_list),len(read2_list)))
if sort_output and \
len(read1_list)==len(read2_list): # if fastq input list is not properly sorted
sorted_read2_list=list()
for file1 in read1_list:
temp_file2=file1.replace('_R1_','_R2_')
if temp_file2 not in read2_list:
raise ValueError('No read2 found for file {0}'.format(file1))
else:
sorted_read2_list.append(temp_file2)
read2_list=sorted_read2_list # reset read2 list
if check_count and \
len(read1_list) > 0 and \
len(read2_list) > 0 :
fastq_df=pd.DataFrame({'R1_file':read1_list,
'R2_file':read2_list})
for entry in fastq_df.to_dict(orient='records'):
compare_fastq_files_read_counts(r1_file=entry['R1_file'],
r2_file=entry['R2_file'])
return read1_list, read2_list
except:
raise
def detect_non_fastq_in_file_list(input_list):
'''
A method for detecting non fastq file within a list of input fastq
:param input_list: A list of filepath to check
:returns: True in non fastq files are present or else False
'''
try:
fastq_pattern=re.compile(r'\S+\.fastq(\.gz)?')
non_fastq_found=False
for file in input_list:
if not re.match(fastq_pattern,os.path.basename(file)):
non_fastq_found=True
return non_fastq_found
except:
raise
def compare_fastq_files_read_counts(r1_file,r2_file):
'''
A method for comparing read counts for fastq pairs
:param r1_file: Fastq pair R1 file path
:param r2_file: Fastq pair R2 file path
:raises: ValueError if counts are not same
'''
try:
check_file_path(r1_file)
check_file_path(r2_file)
r1_count=count_fastq_lines(r1_file)
r2_count=count_fastq_lines(r2_file)
if r1_count != r2_count:
raise ValueError('Fastq pair does not have same number of reads: {0} {1}'.\
format(r1_file,r2_file))
except:
raise
def count_fastq_lines(fastq_file):
'''
A method for counting fastq lines
:param fastq_file: A gzipped or unzipped fastq file
:returns: Fastq line count
'''
try:
gzipped_pattern=re.compile(r'\S+\.(fastq|fq)\.gz$')
unzipped_pattern=re.compile(r'\S+\.(fastq|fq)$')
lines=0
check_file_path(fastq_file)
if re.match(gzipped_pattern,fastq_file): # read gzipped file
with gzip.open(fastq_file, 'rb') as f:
buf_size = 1024 * 1024
read_f = f.read
buf = read_f(buf_size)
while buf:
lines += buf.count(b'\n')
buf = read_f(buf_size)
elif re.match(unzipped_pattern,fastq_file): # read unzipped file
with open(filename, 'rb') as f:
buf_size = 1024 * 1024
read_f = f.raw.read
buf = read_f(buf_size)
while buf:
lines += buf.count(b'\n')
buf = read_f(buf_size)
else:
raise ValueError('Failed to detect read mode for fastq file {0}'.\
format(fastq_file))
if lines >= 4 :
if lines % 4 != 0:
raise ValueError('Fastq file missing have block of 4 lines:{0}'.\
format(fastq_file))
lines = int(lines/4)
return lines
except:
raise
|
def max_buffed_damage(max_base_damage: float, blood_drinker: float, blood_thirst: float):
return max_base_damage + blood_drinker + blood_thirst
def min_buffed_damage(max_buffed_damage: float, weapon_variance: float):
return max_buffed_damage * (1.0 - weapon_variance)
def starting_weapon_variance(min_base_damage: float, max_base_damage: float):
return (max_base_damage - min_base_damage) / max_base_damage
def starting_average_damage(min_base_damage: float, max_base_damage: float, blood_drinker: float, blood_thirst: float, critical_hit_rate=0.11, critical_hit_mod=2.0, damage_rating_percent=1.05):
max_buffed = max_buffed_damage(max_base_damage, blood_drinker, blood_thirst)
min_buffed = min_buffed_damage(max_buffed, starting_weapon_variance(min_base_damage, max_base_damage))
return round(max_buffed * critical_hit_rate * critical_hit_mod * critical_hit_damage_rating_percent() + (1.0 - critical_hit_rate) * ((max_buffed + min_buffed) / 2.0) * damage_rating_percent, 2)
def critical_hit_damage_rating_percent(damage_rating=5.0, critical_hit_damage_rating=3.0):
return (100.0 + damage_rating + critical_hit_damage_rating) / 100.0
def average_damage(min_base_damage, max_base_damage, blood_thirst, i, critical_hit_mod=2.0, blood_drinker=24.0, damage_rating_percent=1.05, critical_hit_rate=0.11, number_of_tinks=9.0):
return round(damage_rating_percent * (((1.0 - (1.0 - min_base_damage / max_base_damage) * pow(0.8, number_of_tinks - float(i))) * (max_base_damage + blood_drinker + blood_thirst + float(i)) + (max_base_damage + blood_drinker + blood_thirst + float(i))) / 2.0) + (max_base_damage + blood_drinker + blood_thirst + float(i)) * critical_hit_rate * critical_hit_mod * critical_hit_damage_rating_percent(), 2)
|
import math
from typing import List
def mean(target_list: List[int], floor: bool = True) -> int:
"""
Gets the mean value of the list, rounded down if floor is True, else rounded up
"""
if floor:
return mean_floor(target_list)
return mean_ceil(target_list)
def mean_floor(target_list: List[int]) -> int:
"""
Gets the rounded down mean of the list
"""
return sum(target_list) // len(target_list)
def mean_ceil(target_list: List[int]) -> int:
"""
Gets the rounded up mean of the list
"""
return math.ceil(sum(target_list) / len(target_list))
def gaussian_sum(number: int) -> int:
"""
Gets the sum of all numbers up to the provided number.
E.g. gaussian_sum(5) == sum([1, 2, 3, 4, 5])
:param number:
:return:
"""
return number * (1 + number) // 2
|
# Created by MechAviv
# ID :: [4000021]
# Maple Road : Entrance to Adventurer Training Center
sm.setSpeakerID(12100)
selection = sm.sendNext("This is the perfect place to train your basic skills. Where do you want to train?\r\n#b#L0#Adventurer Training Center 1#l\r\n#b#L1#Adventurer Training Center 2#l\r\n#b#L2#Adventurer Training Center 3#l\r\n#b#L3#Adventurer Training Center 4#l")
if selection == 0:
sm.warp(4000022, 4)
elif selection == 1:
sm.warp(4000023, 4)
elif selection == 2:
sm.warp(4000024, 4)
elif selection == 3:
sm.warp(4000025, 4)
|
"""Window class for Nuke.
TODO: Get callbacks from https://learn.foundry.com/nuke/developers/110/pythonreference/
TODO: Figure out how to launch a floating panel
"""
from __future__ import absolute_import, print_function
import inspect
import uuid
from collections import defaultdict
from functools import partial
from Qt import QtWidgets
import nuke
from nukescripts import panels, utils
from .abstract import AbstractWindow, getWindowSettings
from .standalone import StandaloneWindow
from .utils import hybridmethod, setCoordinatesToScreen, searchGlobals
VERSION = float('{}.{}'.format(nuke.env['NukeVersionMajor'], nuke.env['NukeVersionMinor']))
class RuntimeDraggingError(RuntimeError):
"""Custom error message for when a window is being dragged."""
def __init__(self):
super(RuntimeDraggingError, self).__init__("window is currently in a quantum state (while dragging it technically doesn't exist)")
def getMainWindow():
"""Returns the Nuke main window.
If nothing can be found, None will be returned.
Source: https://github.com/fredrikaverpil/pyvfx-boilerplate/blob/master/boilerplate.py
"""
for obj in QtWidgets.QApplication.topLevelWidgets():
if obj.inherits('QMainWindow') and obj.metaObject().className() == 'Foundry::UI::DockMainWindow':
return obj
def deleteQtWindow(windowId):
"""Delete a window.
Source: https://github.com/fredrikaverpil/pyvfx-boilerplate/blob/master/boilerplate.py
"""
for obj in QtWidgets.QApplication.allWidgets():
if obj.objectName() == windowId:
obj.deleteLater()
def _removeMargins(widget):
"""Remove Nuke margins when docked UI
Source: https://gist.github.com/maty974/4739917
"""
for parent in (widget.parentWidget().parentWidget(), widget.parentWidget().parentWidget().parentWidget().parentWidget()):
parent.layout().setContentsMargins(0, 0, 0, 0)
class Pane(object):
@classmethod
def get(cls, value=None):
if value is not None:
return nuke.getPaneFor(value)
return cls.auto()
@classmethod
def auto(cls):
"""Automatically select a pane to attach to.
If there are somehow no panels that exist then None will be returned.
"""
for pane_func in cls.__PRIORITY:
pane = pane_func.__get__(cls, None)()
if pane is not None:
return pane
@classmethod
def find(cls, windowID):
"""Find which pane the WindowID is docked to."""
current_pane = nuke.getPaneFor(windowID)
if current_pane is None:
return None
for pane_func in cls.__PRIORITY:
index = 1
while True:
pane = pane_func.__get__(cls, None)(index)
if pane is None:
break
if pane == current_pane:
return pane_func.__get__(cls, None)(index, name=True)
index += 1
@classmethod
def Properties(cls, index=1, name=False):
panel_name = 'Properties.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def NodeGraph(cls, index=1, name=False):
panel_name = 'DAG.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def Viewer(cls, index=1, name=False):
panel_name = 'Viewer.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def Progress(cls, index=1, name=False):
panel_name = 'Progress.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def DopeSheet(cls, index=1, name=False):
panel_name = 'DopeSheet.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def Toolbar(cls, index=1, name=False):
panel_name = 'Toolbar.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def CurveEditor(cls, index=1, name=False):
panel_name = 'Curve Editor.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def PixelAnalyzer(cls, index=1, name=False):
panel_name = 'Pixel Analyzer.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def ErrorConsole(cls, index=1, name=False):
panel_name = 'Error Console.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def ScriptEditor(cls, index=1, name=False):
panel_name = 'uk.co.thefoundry.scripteditor.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def Histogram(cls, index=1, name=False):
panel_name = 'uk.co.thefoundry.histogram.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def Waveform(cls, index=1, name=False):
panel_name = 'uk.co.thefoundry.waveformscope.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
@classmethod
def Vectorscope(cls, index=1, name=False):
panel_name = 'uk.co.thefoundry.vectorscope.{}'.format(index)
if name:
return panel_name
return nuke.getPaneFor(panel_name)
__PRIORITY = [
Properties,
NodeGraph,
Viewer,
DopeSheet,
CurveEditor,
PixelAnalyzer,
Progress,
ErrorConsole,
ScriptEditor,
Histogram,
Waveform,
Vectorscope,
Toolbar,
]
class NukeCommon(object):
pass
class NukeWindow(NukeCommon, AbstractWindow):
"""Base class for docking windows in Nuke.
Docked Window Workarounds:
Because Nuke only "hides" docked windows and never closes them,
a few special workarounds need to be done on some features.
Window Position:
This must be run every time the window is hidden, to ensure
the location is as up to date as possible. The process of
dragging a window actually sends a hideEvent, but will
cause errors when trying to query positions as it gets
detached from all parents.
Callbacks:
Since there is no closeEvent to catch, every callback will
unregister when the window is hidden, and be registered
again when the window appears again.
By defining a "checkForChanges" method, code can be run
after re-registering the callbacks.
"""
_CALLBACKS = {
'onUserCreate': ('addOnUserCreate', 'removeOnUserCreate'),
'onCreate': ('addOnCreate', 'removeOnCreate'),
'onScriptLoad': ('addOnScriptLoad', 'removeOnScriptLoad'),
'onScriptSave': ('addOnScriptSave', 'removeOnScriptSave'),
'onScriptClose': ('addOnScriptClose', 'removeOnScriptClose'),
'onDestroy': ('addOnDestroy', 'removeOnDestroy'),
'knobChanged': ('addKnobChanged', 'removeKnobChanged'),
'updateUI': ('addUpdateUI', 'removeUpdateUI'),
}
def __init__(self, parent=None, dockable=True, **kwargs):
"""Create the Nuke window.
By default dockable must be True as Nuke provides no control
over it when creating a panel.
"""
if parent is None:
parent = getMainWindow()
super(NukeWindow, self).__init__(parent, **kwargs)
self.nuke = True
self.__windowHidden = False
self.setDockable(dockable, override=True)
# Fix for parent bug
# See NukeWindow.parent for more information
self.__useNukeTemporaryParent = True
self.windowReady.connect(partial(setattr, self, '__useNukeTemporaryParent', False))
# This line seemed to be recommended, but I'm not sure why
#if not self.dockable():
# self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
def closeEvent(self, event):
"""Special case for closing docked windows."""
super(NukeWindow, self).clearWindowInstance(self.WindowID)
if self.dockable():
if self.exists():
try:
#Delete the pane if it is floating by itself
if self.floating(alternative=True) and self.siblings() == 1:
self.parent().parent().parent().parent().parent().parent().parent().parent().parent().close()
#Remove the tab and pane if by itself
else:
self.parent().parent().parent().parent().parent().parent().parent().close()
deleteQtWindow(self.WindowID)
except RuntimeDraggingError:
pass
else:
self.saveWindowPosition()
return super(NukeWindow, self).closeEvent(event)
def setDefaultSize(self, width, height):
"""Override of setDefaultSize to disable it if window is docked."""
if not self.dockable():
super(NukeWindow, self).setDefaultSize(width, height)
def setDefaultWidth(self, width):
"""Override of setDefaultWidth to disable it if window is docked."""
if not self.dockable():
super(NukeWindow, self).setDefaultWidth(width)
def setDefaultHeight(self, height):
"""Override of setDefaultHeight to disable it if window is docked."""
if not self.dockable():
super(NukeWindow, self).setDefaultHeight(height)
def setDefaultPosition(self, x, y):
"""Override of setDefaultPosition to disable it if window is docked."""
if not self.dockable():
super(NukeWindow, self).setDefaultPosition(x, y)
def setWindowPalette(self, program, version=None, style=True, force=False):
"""Set the palette of the window.
This will change the entire Nuke GUI so it's disabled by default.
The force parameter can be set to override this behaviour.
"""
if force:
super(NukeWindow, self).setWindowPalette(program, version, style)
def windowPalette(self):
"""Get the current window palette."""
currentPalette = super(NukeWindow, self).windowPalette()
if currentPalette is None:
return 'Nuke.{}'.format(VERSION)
return currentPalette
def exists(self, alternative=False):
"""Check if the window still exists.
See if it is attached to any pane, or check the parents up to the QStackedWidget.
"""
if self.dockable():
if alternative:
return self.parent().parent().parent().parent().parent().parent().parent().parent() is not None
return Pane.get(self.WindowID) is not None
return not self.isClosed()
def floating(self, alternative=False):
"""Determine if the window is floating."""
if self.dockable():
if alternative:
try:
return self.parent().parent().parent().parent().parent().parent().parent().parent().parent().parent().parent().parent() is not None
except AttributeError:
raise RuntimeDraggingError
return Pane.find(self.WindowID) is None
return True
def siblings(self):
"""Count the number of siblings in the QStackedWidget."""
if self.dockable():
try:
return self.parent().parent().parent().parent().parent().parent().parent().parent().count()
except AttributeError:
return 0
return None
def resize(self, *args, **kwargs):
"""Resize the window.
Only resize after loading has finished if it's not docked to a panel.
"""
if not self._windowLoaded:
return self.windowReady.connect(partial(self.resize, *args, **kwargs))
try:
floating = self.floating()
except RuntimeDraggingError:
floating = False
if not self.dockable() or floating:
super(NukeWindow, self).resize(*args, **kwargs)
def move(self, *args, **kwargs):
"""Move the window.
Only move after loading has finished if it's not docked to a panel.
"""
if not self._windowLoaded:
return self.windowReady.connect(partial(self.move, *args, **kwargs))
if not self.dockable() or self.floating():
super(NukeWindow, self).move(*args, **kwargs)
def getAttachedPane(self):
"""Find the name of the pane the window is attached to."""
return Pane.find(self.WindowID)
def saveWindowPosition(self):
"""Save the window location."""
if 'nuke' not in self.windowSettings:
self.windowSettings['nuke'] = {}
settings = self.windowSettings['nuke']
settings['docked'] = self.dockable(raw=True)
key = self._getSettingsKey()
if key not in settings:
settings[key] = {}
try:
settings[key]['width'] = self.width()
settings[key]['height'] = self.height()
settings[key]['x'] = self.x()
settings[key]['y'] = self.y()
# Save docked specific settings
if self.dockable():
panel = self.getAttachedPane()
if panel is not None:
settings[key]['panel'] = panel
# Catch error if window is being dragged at this moment
except RuntimeDraggingError as e:
if not self.dockable():
raise
super(NukeWindow, self).saveWindowPosition()
def loadWindowPosition(self):
"""Set the position of the window when loaded."""
if self.dockable():
return
try:
settings = self.windowSettings['nuke']['main']
x = settings['x']
y = settings['y']
width = settings['width']
height = settings['height']
except KeyError:
super(NukeWindow, self).loadWindowPosition()
else:
x, y = setCoordinatesToScreen(x, y, width, height, padding=5)
self.resize(width, height)
self.move(x, y)
def _parentOverride(self, usePane=False):
"""Get the widget that contains the correct size and position on screen."""
try:
if usePane:
pane = Pane.get(self.WindowID)
if pane is None:
raise AttributeError()
return pane
if not self.floating(alternative=True):
return self.parent().parent().parent().parent().parent().parent().parent().parent().parent()
return self.parent().parent().parent().parent().parent().parent().parent().parent().parent().parent().parent()
except AttributeError:
if self.exists():
raise
else:
raise RuntimeDraggingError
def width(self):
"""Override to get docked width."""
if self.dockable():
return self._parentOverride(usePane=True).width()
return super(NukeWindow, self).width()
def height(self):
"""Override to get docked height."""
if self.dockable():
return self._parentOverride(usePane=True).width()
return super(NukeWindow, self).height()
def _registerNukeCallbacks(self):
"""Register all callbacks."""
numEvents = 0
windowInstance = self.windowInstance()
for group in windowInstance['callback'].keys():
for callbackName, (callbackAdd, callbackRemove) in self._CALLBACKS.items():
for func in windowInstance['callback'][group][callbackName]:
for nodeClass in windowInstance['callback'][group][callbackName][func]:
if nodeClass is None:
getattr(nuke, callbackAdd)(func)
else:
getattr(nuke, callbackAdd)(func, nodeClass=nodeClass)
numEvents += 1
return numEvents
def _unregisterNukeCallbacks(self, group=None):
"""Unregister all callbacks."""
numEvents = 0
windowInstance = self.windowInstance()
for group in windowInstance['callback'].keys():
for callbackName, (callbackAdd, callbackRemove) in self._CALLBACKS.items():
for func in windowInstance['callback'][group][callbackName]:
for nodeClass in windowInstance['callback'][group][callbackName][func]:
if nodeClass is None:
getattr(nuke, callbackRemove)(func)
else:
getattr(nuke, callbackRemove)(func, nodeClass=nodeClass)
numEvents += 1
return numEvents
def removeCallback(self, func, group=None, nodeClass=None):
"""Remove an individual callback."""
windowInstance = self.windowInstance()
if group is None:
groups = windowInstance['callback'].keys()
else:
if group not in windowInstance['callback']:
groups = []
groups = [group]
numEvents = 0
for group in groups:
for callbackName, (callbackAdd, callbackRemove) in self._CALLBACKS.items():
if func in windowInstance['callback'][group][callbackName]:
for nodeClass in windowInstance['callback'][group][callbackName][func]:
if nodeClass is None:
if nodeClass is None:
getattr(nuke, callbackRemove)(func)
else:
getattr(nuke, callbackRemove)(func, nodeClass=nodeClass)
elif nodeClass == nodeClass:
getattr(nuke, callbackRemove)(func, nodeClass=nodeClass)
else:
continue
numEvents += 1
del windowInstance['callback'][group][callbackName][func][nodeClass]
return numEvents
@hybridmethod
def removeCallbacks(cls, self, group=None, windowInstance=None, windowID=None):
"""Remove a callback group or all callbacks."""
# Handle classmethod
if self is cls:
if windowInstance is None and windowID is not None:
windowInstance = cls.windowInstance(windowID)
if windowInstance is None:
raise ValueError('windowInstance or windowID parameter is required for classmethod')
# Handle normal method
elif windowInstance is None:
windowInstance = self.windowInstance()
# Select all groups if specific one not provided
if group is None:
groups = windowInstance['callback'].keys()
else:
if group not in windowInstance['callback']:
groups = []
else:
groups = [group]
# Iterate through each callback to remove certain groups
numEvents = 0
for group in groups:
for callbackName, (callbackAdd, callbackRemove) in self._CALLBACKS.items():
for func in windowInstance['callback'][group][callbackName]:
for nodeClass in windowInstance['callback'][group][callbackName][func]:
if nodeClass is None:
getattr(nuke, callbackRemove)(func)
else:
getattr(nuke, callbackRemove)(func, nodeClass=nodeClass)
numEvents += 1
del windowInstance['callback'][group]
return numEvents
def _addNukeCallbackGroup(self, group):
windowInstance = self.windowInstance()
if group in windowInstance['callback']:
return
windowInstance['callback'][group] = defaultdict(lambda: defaultdict(set))
def addCallbackOnUserCreate(self, func, nodeClass=None, group=None):
"""Executed whenever a node is created by the user.
Not called when loading existing scripts, pasting nodes, or undoing a delete.
"""
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['onUserCreate'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addOnUserCreate(func)
else:
nuke.addOnUserCreate(func, nodeClass=nodeClass)
def addCallbackOnCreate(self, func, nodeClass=None, group=None):
"""Executed when any node is created.
Examples include loading a script (includes new file), pasting a node, selecting a menu item, or undoing a delete.
"""
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['onCreate'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addOnCreate(func)
else:
nuke.addOnCreate(func, nodeClass=nodeClass)
def addCallbackOnScriptLoad(self, func, nodeClass=None, group=None):
"""Executed when a script is loaded.
This will be called by onCreate (for root), and straight after onCreate.
"""
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['onScriptLoad'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addOnScriptLoad(func)
else:
nuke.addOnScriptLoad(func, nodeClass=nodeClass)
def addCallbackOnScriptSave(self, func, nodeClass=None, group=None):
"""Executed when the user tries to save a script."""
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['onScriptSave'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addOnScriptSave(func)
else:
nuke.addOnScriptSave(func, nodeClass=nodeClass)
def addCallbackOnScriptClose(self, func, nodeClass=None, group=None):
"""Executed when Nuke is exited or the script is closed."""
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['onScriptClose'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addOnScriptClose(func)
else:
nuke.addOnScriptClose(func, nodeClass=nodeClass)
def addCallbackOnDestroy(self, func, nodeClass=None, group=None):
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['onDestroy'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addOnDestroy(func)
else:
nuke.addOnDestroy(func, nodeClass=nodeClass)
def addCallbackKnobChanged(self, func, nodeClass=None, group=None):
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['knobChanged'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addKnobChanged(func)
else:
nuke.addKnobChanged(func, nodeClass=nodeClass)
def addCallbackUpdateUI(self, func, nodeClass=None, group=None):
self._addNukeCallbackGroup(group)
self.windowInstance()['callback'][group]['updateUI'][func].add(nodeClass)
if not self.__windowHidden:
if nodeClass is None:
nuke.addUpdateUI(func)
else:
nuke.addUpdateUI(func, nodeClass=nodeClass)
@classmethod
def clearWindowInstance(cls, windowID):
"""Close the last class instance."""
try:
previousInstance = super(NukeWindow, cls).clearWindowInstance(windowID)
except TypeError:
return
if previousInstance is None:
return
cls.removeCallbacks(windowInstance=previousInstance)
#Shut down the window
if not previousInstance['window'].isClosed():
try:
previousInstance['window'].close()
except (RuntimeError, ReferenceError):
pass
def deferred(self, func, *args, **kwargs):
"""Execute a deferred command."""
utils.executeDeferred(func, *args, **kwargs)
def parent(self, *args, **kwargs):
"""Fix a weird Nuke crash.
It seems to be under a specific set of circumstances, so I'm
not sure how to deal with it other than with this workaround.
Details specific to my issue:
Non-dockable window
Location data doesn't exist, causing centreWindow to run
Requesting self.parent() inside centreWindow crashes Nuke.
This fix runs getMainWindow if loading isn't complete.
"""
if not self.__useNukeTemporaryParent or self.dockable():
return super(NukeWindow, self).parent(*args, **kwargs)
return getMainWindow()
def hideEvent(self, event):
"""Unregister callbacks and save window location."""
if not event.spontaneous() and not self.isClosed():
try:
self._unregisterNukeCallbacks()
except TypeError:
self.__windowHidden = True
self.saveWindowPosition()
return super(NukeWindow, self).hideEvent(event)
def showEvent(self, event):
"""Register callbacks and update UI (if checkForChanges is defined)."""
if not event.spontaneous():
self._registerNukeCallbacks()
self.__windowHidden = False
if hasattr(self, 'checkForChanges'):
self.checkForChanges()
return super(NukeWindow, self).showEvent(event)
def hide(self):
"""Hide the Nuke window."""
if not self.dockable():
return super(NukeWindow, self).hide()
@hybridmethod
def show(cls, self, *args, **kwargs):
"""Show the Nuke window."""
# Window is already initialised
if self is not cls:
if self.dockable():
return None
return super(NukeWindow, self).show()
#Close down any instances of the window
try:
cls.clearWindowInstance(cls.WindowID)
except AttributeError:
settings = {}
else:
settings = getWindowSettings(cls.WindowID)
#Load settings
try:
nukeSettings = settings['nuke']
except KeyError:
nukeSettings = settings['nuke'] = {}
if hasattr(cls, 'WindowDockable'):
docked = cls.WindowDockable
else:
try:
docked = nukeSettings['docked']
except KeyError:
try:
docked = cls.WindowDefaults['docked']
except (AttributeError, KeyError):
docked = True
dockOverride = False
if docked:
# Attempt to find the module in the global scope
# If it can't be found, then it can't be docked
namespace = searchGlobals(cls)
if namespace is None:
docked = cls.WindowDockable = False
dockOverride = True
#Return new class instance and show window
if docked:
try:
pane = Pane.get(nukeSettings['dock']['panel']) or Pane.auto()
except KeyError:
pane = Pane.auto()
# Set WindowID if needed but disable saving
class WindowClass(cls):
if not hasattr(cls, 'WindowID'):
WindowID = uuid.uuid4().hex
def enableSaveWindowPosition(self, enable):
return super(WindowClass, self).enableSaveWindowPosition(False)
panel = panels.registerWidgetAsPanel(
widget=namespace,
name=getattr(WindowClass, 'WindowName', 'New Window'),
id=WindowClass.WindowID,
create=True,
)
panel.addToPane(pane)
panelObject = panel.customKnob.getObject()
if panelObject is not None:
widget = panelObject.widget
_removeMargins(widget)
widget.deferred(widget.windowReady.emit)
return widget
kwargs['dockable'] = False
win = super(NukeWindow, cls).show(*args, **kwargs)
if dockOverride:
cls.WindowDockable = True
win.setDockable(True, override=True)
return win
@classmethod
def dialog(cls, parent=None, *args, **kwargs):
"""Create the window as a dialog."""
if parent is None:
parent = getMainWindow()
return super(NukeWindow, cls).dialog(parent=parent, *args, **kwargs)
class NukeBatchWindow(NukeCommon, StandaloneWindow):
"""Variant of the Standalone window for Nuke in batch mode.
Warning: This does not yet work properly. It is able to launch a
process to run the GUI in (since batch mode uses a QCoreApplication
which does not allow windows), but that process is not able to
correctly import the "_nuke" library.
"""
def __init__(self, parent=None, **kwargs):
super(NukeBatchWindow, self).__init__(parent, **kwargs)
self.nuke = False
self.batch = True
self.standalone = False
def setWindowPalette(self, program, version=None, style=True, force=False):
if force:
super(NukeBatchWindow, self).setWindowPalette(program, version, style)
def saveWindowPosition(self):
"""Save the window location."""
if 'nuke' not in self.windowSettings:
self.windowSettings['nuke'] = {}
settings = self.windowSettings['nuke']
key = self._getSettingsKey()
if key not in settings:
settings[key] = {}
try:
settings[key]['width'] = self.width()
settings[key]['height'] = self.height()
settings[key]['x'] = self.x()
settings[key]['y'] = self.y()
except RuntimeDraggingError:
if not self.dockable():
raise
else:
super(NukeBatchWindow, self).saveWindowPosition()
def loadWindowPosition(self):
"""Set the position of the window when loaded."""
key = self._getSettingsKey()
try:
width = self.windowSettings['nuke'][key]['width']
height = self.windowSettings['nuke'][key]['height']
x = self.windowSettings['nuke'][key]['x']
y = self.windowSettings['nuke'][key]['y']
except KeyError:
super(NukeBatchWindow, self).loadWindowPosition()
else:
x, y = setCoordinatesToScreen(x, y, width, height, padding=5)
self.resize(width, height)
self.move(x, y)
@hybridmethod
def show(cls, self, *args, **kwargs):
"""Load the window in Nuke batch mode."""
# Window is already initialised
if self is not cls:
return super(NukeBatchWindow, self).show()
# Close down window if it exists and open a new one
try:
cls.clearWindowInstance(cls.WindowID)
except AttributeError:
pass
kwargs['instance'] = False
kwargs['exec_'] = True
return super(NukeBatchWindow, cls).show(*args, **kwargs)
@classmethod
def dialog(cls, parent=None, *args, **kwargs):
"""Create the window as a dialog."""
if parent is None:
parent = getMainWindow()
return super(NukeWindow, cls).dialog(parent=parent, *args, **kwargs)
|
import pytest
from obsei.preprocessor.text_splitter import TextSplitterConfig
from obsei.payload import TextPayload
DOCUMENT_1 = """I love playing console games."""
DOCUMENT_2 = """Beyoncรฉ Giselle Knowles-Carter (/biหหjษnseษช/ bee-YON-say; born September 4, 1981)[6] is an American singer, songwriter, record producer, and actress. Born and raised in Houston, Texas, Beyoncรฉ performed in various singing and dancing competitions as a child. She rose to fame in the late 1990s as the lead singer of Destiny's Child, one of the best-selling girl groups of all time. Their hiatus saw the release of her first solo album, Dangerously in Love (2003), which featured the US Billboard Hot 100 number-one singles "Crazy in Love" and "Baby Boy". Following the 2006 disbandment of Destiny's Child, she released her second solo album, B'Day, which contained hit singles "Irreplaceable" and "Beautiful Liar". Beyoncรฉ also starred in multiple films such as The Pink Panther (2006), Dreamgirls (2006), Obsessed (2009), and The Lion King (2019). Her marriage to Jay-Z and her portrayal of Etta James in Cadillac Records (2008) influenced her third album, I Am... Sasha Fierce (2008), which earned a record-setting six Grammy Awards in 2010. It spawned the successful singles "If I Were a Boy", "Single Ladies (Put a Ring on It)", and "Halo". After splitting from her manager and father Mathew Knowles in 2010, Beyoncรฉ released her musically diverse fourth album 4 in 2011. She later achieved universal acclaim for her sonically experimental visual albums, Beyoncรฉ (2013) and Lemonade (2016), the latter of which was the world's best-selling album of 2016 and the most acclaimed album of her career, exploring themes of infidelity and womanism. In 2018, she released Everything Is Love, a collaborative album with her husband, Jay-Z, as the Carters. As a featured artist, Beyoncรฉ topped the Billboard Hot 100 with the remixes of "Perfect" by Ed Sheeran in 2017 and "Savage" by Megan Thee Stallion in 2020. The same year, she released the musical film and visual album Black Is King to widespread acclaim."""
DOC1_VAL = [29]
DOC2_VAL1 = [503, 512, 504, 384]
DOC2_VAL2 = [503, 512, 507, 505, 394]
MAX_SPLIT_LENGTH = 512
SPLIT_STRIDE = 128
@pytest.mark.parametrize(
"doc, expected_lengths, stride",
[(DOCUMENT_1, DOC1_VAL, 0), (DOCUMENT_1, DOC1_VAL, 128), (DOCUMENT_2, DOC2_VAL1, 0), (DOCUMENT_2, DOC2_VAL2, 128)]
)
def test_splits(doc, expected_lengths, stride, text_splitter):
doc_splits = text_splitter.preprocess_input(
input_list=[TextPayload(processed_text=doc)],
config=TextSplitterConfig(max_split_length=MAX_SPLIT_LENGTH, split_stride=stride),
)
assert len(expected_lengths) == len(doc_splits)
for text_payload, expected_length in zip(doc_splits, expected_lengths):
assert "splitter" in text_payload.meta
splitter_payload = text_payload.meta["splitter"]
assert splitter_payload.chunk_length == expected_length
|
#!/usr/bin/env python
# encoding: utf-8
import sys
from setuptools import setup
version = "1.0.6"
description = (
"A pure Python implementation for the famous DES algorithm"
)
if sys.version_info < (3, ):
kw = {}
else:
kw = {"encoding": "utf-8"}
long_description = open("README.md", **kw).read()
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Security",
"Topic :: Security :: Cryptography",
]
setup(
name="des",
version=version,
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
license="MIT",
author="Eric Wong",
author_email="ericwong@zju.edu.cn",
url="https://github.com/littlefisher/des",
packages=["des"],
classifiers=classifiers,
)
|
from argparse import ArgumentError
from typing import Tuple
from numpy import dtype
from nn_trainer.utils import *
import torch
import torch.optim
from nn_trainer.data import *
from nn_trainer.models import *
from nn_trainer.trainers import *
from nn_trainer.plotters import *
from nn_trainer.metrics import *
from nn_trainer.callbacks import *
from nn_trainer.trainers import BasicNnTrainer
args = parse_arguments()
MODEL_SIZE = 16
INPUT_SIZE = 64
LOGGER = Logger("nn_trainer")
args['training_subset_ratio'] = .5
args['model_size'] = MODEL_SIZE
args['input_size'] = INPUT_SIZE
args['num_epochs'] = 700
args['batch_size'] = 128
args['latent_dim'] = 0
args['learning_rate'] = 0.0005
args['feature_range_min'] = -1
args['feature_range_max'] = 1
class LinearGenerator(nn.Module):
def __init__(self, in_samples:int, out_samples:int, hidden_layer_count:int = 1, hidden_sample_count:int = 15):
super(LinearGenerator, self).__init__()
self.fc_l0 = nn.Linear(in_samples, hidden_sample_count)
self.bn0 = nn.BatchNorm1d(hidden_sample_count)
hidden_layer_count = hidden_layer_count
self.hidden_layers = nn.ModuleList()
self.hidden_bn_layers = nn.ModuleList()
for hli in range(hidden_layer_count):
self.hidden_layers.append(nn.Linear(hidden_sample_count, hidden_sample_count))
self.hidden_bn_layers.append(nn.BatchNorm1d(hidden_sample_count))
self.fc_l2 = nn.Linear(hidden_sample_count, out_samples)
self.bn2 = nn.BatchNorm1d(out_samples)
for m in self.modules():
if isinstance(m, nn.Conv1d) or isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight.data)
def forward(self, input):
if len(input.shape) == 3 and input.shape[1] == 1:
input = input.view(-1, input.shape[2])
elif len(input.shape) != 2:
raise ArgumentError("dimensionality of data array is incorrect")
x = F.leaky_relu(self.bn0(self.fc_l0(input)))
for index, layer in enumerate(self.hidden_layers):
bn_func = self.hidden_bn_layers[index]
x = F.leaky_relu(bn_func(layer(x)))
x = F.leaky_relu(self.bn2(self.fc_l2(x)))
output = torch.tanh(x)
return output
data_set = CircleFullDataSet()
# Generator factory method used to allow gan object to create generator
# lazily (only when needed). can be exchanged easily
def generator_ctor() -> Tuple[nn.Module, torch.optim.Optimizer, torch.optim.lr_scheduler._LRScheduler]:
gNet = LinearGenerator(data_set.input_shape[1], data_set.output_shape[1])
init_weights(gNet)
return gNet
netG = generator_ctor()
trainer = BasicNnTrainer(neural_network=netG, verbose=True, max_epoch_count=args["num_epochs"],batch_size=args["batch_size"], dtype=torch.float64, output_dir=OUTPUT_PATH, logger=LOGGER)
# Create callbacks for logging and plotting state
callbacks = [
DataPlotterCallback(data_set.validation_set, args["sample_size"], netG, ScatterDataPlotter(), trainer.output_directory_path, logger=LOGGER),
]
# Gan instantiation and training here
trainer.train(data_set.training_set, data_set.training_set, callbacks=callbacks, metrics=[RMSE, MSE, MAE, AverageCosineSimilarity])
|
#!/usr/bin/env python3
"""
Top 100 most starred GitHub projects grouped by topic description.
Visualized as a interactive 3D pie chart in HTML 5 hosted on GitHub Pages
using Google Charts JavaScript library.
"""
from datetime import datetime, timedelta
import time
from lxml import html
import requests
from pandas_datareader.data import DataReader
import pandas as pd
print("sleep for 45 seconds")
time.sleep(45)
# most starred repositories first page
page = requests.get('https://github.com/search?utf8=%E2%9C%93&q=\
stars%3A10000..1000000&type=Repositories')
tree = html.fromstring(page.content)
data = {}
topic_xpath = '//div/a[contains(@class,"topic-tag")]/text()'
# start code for line chart
now = datetime.now()
start = (now - timedelta(days=365)).date()
end = now.date()
# debug
print(start)
print(end)
# Set the ticker
ticker = 'AAPL' # Apple
# Set the data source
data_source = 'google' # use google finance
# Import the stock prices
stock_prices = DataReader(ticker, data_source, start, end)
#
apple_data = []
day_endings = {
1: 'st',
2: 'nd',
3: 'rd',
21: 'st',
22: 'nd',
23: 'rd',
31: 'st'
}
# build the 3 element sub arrays for the line chart
for k, v in stock_prices['Close'].to_dict().items():
k = int(time.mktime(k.timetuple()))
t = datetime.fromtimestamp(k)
# [date, price, tooltip]
apple_data.append([t.strftime('%Y-%m-%d'),
v,
'{d}\nPrice: {p}'.format(
d=t.strftime(
'%A the %-d' +
day_endings.get(int(t.strftime('%-d')), 'th') + ' of %B %Y'),
p=v)])
# third chart
series = 'DCOILWTICO' # West Texas Intermediate Oil Price
oil = DataReader(series, 'fred', start)
ticker = 'XOM' # Exxon Mobile Corporation
stock = DataReader(ticker, 'google', start)
exxon = pd.concat([stock[['Close']], oil], axis=1)
exxon.columns = ['Exxon', 'Oil Price']
exxon_data = []
for k, v in exxon.to_dict().items():
i = 0
for x, y in v.items():
z = int(time.mktime(x.timetuple()))
t = datetime.fromtimestamp(z)
# [date, exxon, oil]
j = y
if pd.isna(j):
j = 'None'
if k == 'Exxon':
exxon_data.append([t.strftime('%Y-%m-%d'), j, 0])
else:
exxon_data[i][2] = j
i += 1
def get_topics():
"""Build topic 2D array."""
topics = tree.xpath(topic_xpath)
for topic in topics:
top = topic.strip()
if top in data:
data[top] += 1
else:
data[top] = 1
# get first page of results
get_topics()
# retrieve total 10 pages of results based on GitHub limits
while tree.xpath('//div[@class="pagination"]/a[@class="next_page"]'):
page = requests.get('https://github.com' +
tree.xpath('//div[@class="pagination"]/a[@class="next_page"]/@href')[0])
tree = html.fromstring(page.content)
get_topics()
# sort first by value descending and then by topic alphabetically
data = sorted(([k, v] for k, v in data.items()), key=lambda x: (-x[1], x[0]))
# debug
print(data)
# sort by date ascending
apple_data = sorted(([i, j, k] for i, j, k in apple_data), key=lambda x: x[0])
# debug
print(apple_data)
page = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<title>Python Charts</title>
<link rel="stylesheet" href="assets/bootstrap-4.0.0-beta.2-dist/css/bootstrap.min.css">
<style>
.chart { width: 100%; min-height: 450px; }
p { font-size: 2.5rem; }
.logo { float: left; }
</style>
</head>
<body>
<div class="row">
<div class="col-md-12">
<a href="./" class="logo">
<img src="images/other/python-powered.png" alt="Python Powered Logo" title="Python Powered">
</a>
<p class="text-center">Python interactive charting demo</p>
</div>
<div class="col-md-12">
<div id="topic_chart" class="chart"></div>
<div id="apple_chart" class="chart"></div>
<div id="exxon_chart" class="chart"></div>
</div>
<div class="col-md-12">
<a target="_blank" href="https://info.flagcounter.com/a7We" rel="noopener">
<img src="https://s05.flagcounter.com/count2/a7We/bg_FFFFFF/txt_000000/border_CCCCCC/columns_3/maxflags_200/viewers_0/labels_1/pageviews_0/flags_0/percent_0/"
alt="Flag Counter">
</a>
</div>
</div>
"""
page += """<footer>Last built: {t}</footer>""".format(t=datetime.now())
page += """
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load("current", {packages:["corechart"]});
google.charts.setOnLoadCallback(drawChart);
google.charts.setOnLoadCallback(drawChartApple);
google.charts.setOnLoadCallback(drawChartExxon);
function drawChart() {
var data = new google.visualization.DataTable();
data.addColumn('string', 'Topic');
data.addColumn('number', 'Amount');"""
page += """
data.addRows({data});""".format(data=data)
page += """
var options = {
title: 'Top 100 most starred GitHub repositories grouped by topic',
is3D: true
};
var chart = new google.visualization.PieChart(document.getElementById('topic_chart'));
chart.draw(data, options);
}
function drawChartApple() {
var data = new google.visualization.DataTable();
data.addColumn('date', 'Date');
data.addColumn('number', 'Price');
data.addColumn({type: 'string', role: 'tooltip'});"""
page += """
var arr = {data}""".format(data=apple_data)
page += """
for(var i=0; i<arr.length; i++){
var arr_date = arr[i][0].split('-');
arr[i][0] = new Date(arr_date[0], parseInt(arr_date[1])-1, arr_date[2]);
}
data.addRows(arr);
var options = {
title: 'Apple share price over the last year.',
is3D: true,
legend: {position: 'top', alignment: 'start'},
selectionMode: 'multiple',
trendlines: {
0: {
type: 'linear',
color: 'green',
lineWidth: 3,
opacity: 0.3,
tooltip: false,
labelInLegend: 'Trend Line',
visibleInLegend: true
}
}
};
var chart = new google.visualization.LineChart(document.getElementById('apple_chart'));
chart.draw(data, options);
}
function drawChartExxon() {
var data = new google.visualization.DataTable();
data.addColumn('date', 'Date');
data.addColumn('number', 'Exxon');
data.addColumn('number', 'Oil');"""
page += """
var arr = {data}""".format(data=exxon_data)
page += """
for(var i=0; i<arr.length; i++){
var arr_date = arr[i][0].split('-');
arr[i][0] = new Date(arr_date[0], parseInt(arr_date[1])-1, arr_date[2]);
if(arr[i][1] === 'None'){
arr[i][1] = null;
}
if(arr[i][2] === 'None'){
arr[i][2] = null;
}
}
data.addRows(arr);
var options = {
title: 'Exxon share price versus oil price over the last year.',
is3D: true,
interpolateNulls: true,
legend: {position: 'top', alignment: 'start'},
selectionMode: 'multiple',
trendlines: {
0: {
type: 'linear',
color: 'green',
lineWidth: 3,
opacity: 0.3,
tooltip: false,
labelInLegend: 'Exxon Trend Line',
visibleInLegend: true
},
1: {
type: 'linear',
color: 'green',
lineWidth: 3,
opacity: 0.3,
tooltip: false,
labelInLegend: 'Oil Trend Line',
visibleInLegend: true
}
}
};
var chart = new google.visualization.LineChart(document.getElementById('exxon_chart'));
chart.draw(data, options);
}
</script>
<!-- Latest compiled and minified JavaScript -->
<script src="assets/js/jquery-3.2.1.min.js"></script>
<script src="assets/js/popper-1.12.6.min.js"></script>
<script src="assets/bootstrap-4.0.0-beta.2-dist/js/bootstrap.min.js"></script>
<script>
$(window).resize(function(){
drawChart();
drawChartApple();
drawChartExxon();
});
</script>
</body>
</html>"""
with open('site/index.html', 'w') as f:
f.write(page)
|
#!/bin/python
EMISSION_SPEED_FACTOR = 19
DIFFICULTY_TARGET = 240 # seconds
MONEY_SUPPLY = 88888888000000000
GENESIS_BLOCK_REWARD = 8800000000000000
FINAL_SUBSIDY = 4000000000
COIN_EMISSION_MONTH_INTERVAL = 6 #months
COIN_EMISSION_HEIGHT_INTERVAL = int(COIN_EMISSION_MONTH_INTERVAL * 30.4375 * 24 * 3600 / DIFFICULTY_TARGET)
HEIGHT_PER_YEAR = int((12*30.4375*24*3600)/DIFFICULTY_TARGET)
PEAK_COIN_EMISSION_YEAR = 4
PEAK_COIN_EMISSION_HEIGHT = HEIGHT_PER_YEAR * PEAK_COIN_EMISSION_YEAR
def get_block_reward(height, coins_already_generated):
if height < (PEAK_COIN_EMISSION_HEIGHT + COIN_EMISSION_HEIGHT_INTERVAL):
interval_num = height/COIN_EMISSION_HEIGHT_INTERVAL
money_supply_pct = 0.1888 + interval_num*(0.023 + interval_num*0.0032)
cal_block_reward = (MONEY_SUPPLY * money_supply_pct)/(2**EMISSION_SPEED_FACTOR)
else:
cal_block_reward = (MONEY_SUPPLY - coins_already_generated)/(2**EMISSION_SPEED_FACTOR)
return cal_block_reward
def calculate_emssion_speed(print_by_year = False):
coins_already_generated = 0
height = 0
total_time = 0
block_reward = 0
cal_block_reward = 0
count = 0
round_factor = 10000000
print "Height\t\tB.Reward\tCoin Emitted\tEmission(%)\tDays\tYears"
f.write("Height\tB.Reward\tCoin Emitted\tEmission(%)\tDays\tYears\n")
while coins_already_generated < MONEY_SUPPLY - FINAL_SUBSIDY:
emission_speed_change_happened = False
if height % COIN_EMISSION_HEIGHT_INTERVAL == 0:
cal_block_reward = get_block_reward(height, coins_already_generated)
emission_speed_change_happened = True
count += 1
if height == 0:
block_reward = GENESIS_BLOCK_REWARD
else:
block_reward = int(cal_block_reward) / round_factor * round_factor
if block_reward < FINAL_SUBSIDY:
if MONEY_SUPPLY > coins_already_generated:
block_reward = FINAL_SUBSIDY
else:
block_reward = FINAL_SUBSIDY/2
coins_already_generated += block_reward
total_time += DIFFICULTY_TARGET
if emission_speed_change_happened and (count % 2 if print_by_year else True):
print format(height, '07'), "\t", '{0:.10f}'.format(block_reward/1000000000.0), "\t", coins_already_generated/1000000000.0, "\t", str(round(coins_already_generated*100.0/MONEY_SUPPLY, 2)), "\t\t", format(int(total_time/(60*60*24.0)), '04'), "\t", total_time/(60*60*24)/365.25
f.write(format(height, '07') + "\t" + '{0:.8f}'.format(block_reward/1000000000.0) + "\t" + str(coins_already_generated/1000000000.0) + "\t" + '%05.2f'%(coins_already_generated*100.0/MONEY_SUPPLY) + "\t" + format(int(total_time/(60*60*24.0)), '04') + "\t" + str(round(total_time/(60*60*24)/365.25, 2)) + "\n")
height += 1
print format(height, '07'), "\t", '{0:.10f}'.format(block_reward/1000000000.0), "\t", coins_already_generated/1000000000.0, "\t", str(round(coins_already_generated*100.0/MONEY_SUPPLY, 2)), "\t\t", format(int(total_time/(60*60*24.0)), '04'), "\t", total_time/(60*60*24)/365.
f.write(format(height, '07') + "\t" + '{0:.8f}'.format(block_reward/1000000000.0) + "\t" + str(coins_already_generated/1000000000.0) + "\t" + '{0:.2f}'.format(round(coins_already_generated*100.0/MONEY_SUPPLY, 2)) + "\t" + format(int(total_time/(60*60*24.0)), '04') + "\t" + str(round(total_time/(60*60*24)/365.25, 2)) + "\n")
if __name__ == "__main__":
f = open("sumokoin_camel_emmission.txt", "w")
calculate_emssion_speed()
if COIN_EMISSION_MONTH_INTERVAL == 6:
print "\n\n\n"
f.write("\n\n\n")
calculate_emssion_speed(True)
f.close()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mig_main.location_field
class Migration(migrations.Migration):
dependencies = [
('mig_main', '0007_memberprofile_location'),
]
operations = [
migrations.AlterField(
model_name='memberprofile',
name='location',
field=mig_main.location_field.LocationField(blank=True),
),
]
|
from typing import List
class FindAllAnagrams:
def find(self, text: str, pattern: str) -> List[int]:
current_pointer_character_map = {}
pattern_character_map = {}
for i in range(len(pattern)):
pattern_character_map[pattern[i]] = pattern_character_map.get(pattern[i], 0) + 1
current_pointer_character_map[text[i]] = current_pointer_character_map.get(text[i], 0) + 1
print("")
result = [0] if current_pointer_character_map == pattern_character_map else []
left = 0
for c in range(len(pattern), len(text)):
current_pointer_character_map[text[c]] = current_pointer_character_map.get(text[c], 0) + 1
current_pointer_character_map[text[left]] -= 1
if current_pointer_character_map[text[left]] == 0:
current_pointer_character_map.pop(text[left])
left += 1
if current_pointer_character_map == pattern_character_map:
result.append(left)
return result
|
##### 3. ๋น์ง๋ ํ์ต๊ณผ ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ
#### 3.1 ๋น์ง๋ ํ์ต์ ์ข
๋ฅ
### 1) ๋น์ง๋ ๋ณํ
## ๋ฐ์ดํฐ๋ฅผ ์๋กญ๊ฒ ํํํ์ฌ, ์ฌ๋์ด๋ ๋ค๋ฅธ ML Algorithm์ด ์๋ ๋ฐ์ดํฐ๋ณด๋ค ์์๋ณด๊ธฐ ์ฝ๋๋ก ํ๋ ๊ฒ
# ex) ๊ณ ์ฐจ์ ๋ฐ์ดํฐ์ ์ฐจ์ ์๋ฅผ ์ค์ด๋ฉด์ ํ์ํ ๋ฐ์ดํฐ๋ก ํํํ๋ ์ฐจ์ ์ถ์
# ex) ํ
์คํธ ๋ฌธ์์์ ์ฃผ์ ์ถ์ถํ๊ธฐ
### 2) ๊ตฐ์ง
## ๋ฐ์ดํฐ๋ฅผ ๋น์ทํ ๊ฒ๋ผ๋ฆฌ ๊ทธ๋ฃน์ผ๋ก ๋ฌถ๋ ๊ฒ
#### 3.2 ๋น์ง๋ ํ์ต์ ๋์ ๊ณผ์
### ์๊ณ ๋ฆฌ์ฆ์ด ๋ญ๊ฐ ์ ์ฉํ ๊ฒ์ ํ์ตํ๋์ง ํ๊ฐ
### ๋ฌด์์ด ์ฌ๋ฐ๋ฅธ ์ถ๋ ฅ์ธ์ง ๋ชจ๋ฅด๋ ๊ฒฝ์ฐ๊ฐ ๋ง์
## ์๊ณ ๋ฆฌ์ฆ์๊ฒ ์ฐ๋ฆฌ๊ฐ ์ํ๋ ๊ฒ์ ์๋ ค์ค ๋ฐฉ๋ฒ์ด ์์
## ์ง์ ํ์ธํ๋ ๊ฒ์ด ์ ์ผํ ๋ฐฉ๋ฒ์ผ ๊ฒฝ์ฐ๋ ์์.
### ํ์์ ๋ถ์ ๋จ๊ณ์์ ๋ง์ด ์ฌ์ฉํจ
### ๋น์ง๋ ํ์ต์ ์ง๋ ํ์ต์ ์ ์ฒ๋ฆฌ ๋จ๊ณ์์๋ ์ฌ์ฉํจ
#### 3.3 ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ์ ์ค์ผ์ผ ์กฐ์
### DL, SVM ๊ฐ์ algorithm์ data scale์ ๋งค์ฐ ๋ฏผ๊ฐํ๋ค.
### 3.3.1 ์ฌ๋ฌ ๊ฐ์ง ์ ์ฒ๋ฆฌ ๋ฐฉ๋ฒ
## 1) StandardScaler : ๊ฐ ํน์ฑ์ ํ๊ท ์ 0, ๋ถ์ฐ์ 1๋ก ๋ณ๊ฒฝ(ํ์คํ)
# min, max ์ ํ ์์ -> outlier์ ์ทจ์ฝ
## 2) RobustScaler : ํ๊ท ๊ณผ ๋ถ์ฐ ๋์ ์ค๊ฐ๊ฐ๊ณผ ์ฌ๋ถ์๊ฐ์ ์ด์ฉํ ํ์คํ
# ์ด์์น์ ์ทจ์ฝํ์ง ์์
## 3) MinMaxScaler : ๋ชจ๋ ํน์ฑ์ด 0๊ณผ 1 ์ฌ์ด์ ์์นํ๋๋ก ๋ฐ์ดํฐ๋ฅผ ๋ณ๊ฒฝ
# 2์ฐจ์ ๋ฐ์ดํฐ ์
์ ๊ฒฝ์ฐ์๋ ๋ชจ๋ ๋ฐ์ดํฐ๊ฐ ๋์ด 1์ธ ์ ์ฌ๊ฐํ์ ๋ด๊ธด๋ค.
## 4) Normalizer : ํน์ ๋ฒกํฐ์ ์ ํด๋ฆฌ๋์ ๊ธธ์ด๊ฐ 1์ด ๋๋๋ก ๋ฐ์ดํฐ ํฌ์ธํธ ์กฐ์
# ์ฆ ์ง๋ฆ์ด 1์ธ ์(๊ตฌ)์ ๋ฐ์ดํฐ ํฌ์ธํธ ํฌ์
# ๊ฐ ๋ฐ์ดํฐ ํฌ์ธํธ๊ฐ ๋ค๋ฅธ ๋น์จ๋ก(๊ธธ์ด์ ๋ฐ๋น๋กํ์ฌ) ์ค์ผ์ผ์ด ์กฐ์ ๋๋ค๋ ๋ป.
# ํน์ฑ ๋ฒกํฐ์ ๊ธธ์ด๋ ์๊ด ์๊ณ , ๋ฐ์ดํฐ์ ๋ฐฉํฅ(or ๊ฐ๋)๋ง ์ค์ํ ๋์ ๋ง์ด ์ฌ์ฉํจ.
### 3.3.2 ๋ฐ์ดํฐ ๋ณํ ์ ์ฉํ๊ธฐ
## ์ค์ผ์ผ์ ์กฐ์ ํ๋ ์ ์ฒ๋ฆฌ ๋ฉ์๋๋ค์ ๋ณดํต ์ง๋ ํ์ต ์๊ณ ๋ฆฌ์ฆ์ ์ ์ฉํ๊ธฐ ์ ์ ์ ์ฉ.
## ์ ์ฒ๋ฆฌ ํ์ ๋ง๋ค์ด์ง ์ง๋ ํ์ต ๋ชจ๋ธ์ ํ๊ฐํ๋ ค๋ฉด ํ๋ จ ์ธํธ์ ํ
์คํธ ์ธํธ๋ก ๋๋ ์ผ ํ๋ค.
# ์ด ๋ ํ๋ จ ์ธํธ์ ํ
์คํธ ์ธํธ๋ฅผ ๋์ผํ ๋ฐฉ๋ฒ์ผ๋ก ๋ณํ์์ผ์ผ ํ๋ค.
#### 3.4 ์ฐจ์ ์ถ์, ํน์ฑ ์ถ์ถ, ๋งค๋ํด๋ ํ์ต\
### 3.4.1 ์ฃผ์ฑ๋ถ ๋ถ์(PCA)
## ํน์ฑ๋ค์ด ํต๊ณ์ ์ผ๋ก ์๊ด๊ด๊ณ๊ฐ ์๋๋ก ๋ฐ์ดํฐ์
์ ํ์ ์ํค๋ ๊ธฐ์ .
# ์ฒซ ๋ฒ์จฐ ๋ฐฉํฅ๊ณผ ์ง๊ฐ์ธ ๋ฐฉํฅ ์ค์์ ๊ฐ์ฅ ๋ง์ ์ ๋ณด๋ฅผ ๋ด์ ๋ฐฉํฅ์ ์ฐพ๋๋ค.
# 2์ฐจ์์์๋ ๊ฐ๋ฅํ ์ง๊ฐ ๋ฐฉํฅ์ด ํ๋์ง๋ง, ๊ณ ์ฐจ์์์๋ ๋ฌดํ์ด ๋ง์ ์ ์๋ค.
# ์ด๋ฐ ๊ณผ์ ์ ๊ฑฐ์ณ ์ฐพ์ ๋ฐฉํฅ์, ๋ฐ์ดํฐ์ ์๋ ์ฃผ๋ ๋ถ์ฐ์ ๋ฐฉํฅ์ด๋ผ๊ณ ํด์
# ์ฃผ์ฑ๋ถ์ด๋ผ๊ณ ํ๋ค. ์ผ๋ฐ์ ์ผ๋ก๋ ์๋ณธ ํน์ฑ ๊ฐ์๋งํผ ์ฃผ์ฑ๋ถ์ด ์กด์ฌํ๋ค.
## PCA์ ์ํด ํ์ ๋ ์ถ์ ์ฐ๊ด๋์ด ์์ง ์๋ค.
# ๋ณํ๋ ๋ฐ์ดํฐ์ ์๊ด๊ด๊ณ ํ๋ ฌ์ด ๋๊ฐํ๋ ฌ์ด ๋๋ค.
# ์๊ด๊ด๊ณ ํ๋ ฌ : ๊ณต๋ถ์ฐ ํ๋ ฌ์ ์ ๊ทํํ ๊ฒ.
## ์ ์ฒด ๋ฐ์ดํฐ์์ ํ๊ท ์ ๋นผ์ ์ค์ฌ์ ์์ ์ ๋ง์ถ ํ
## ์ถ์ ๋๋ํ๋๋ก ํ์ ํ๋ค. ๊ทธ๋ฆฌ๊ณ ๋์
## ๋ค์ ํ๊ท ์ ๋ํ๊ณ ๋ฐ๋๋ก ํ์ ์ํจ๋ค.
## ๋ฐ์ดํฐ์
์ ํน์ฑ์ด ๋ง์ ๊ฒฝ์ฐ ์ฐ์ ๋ ํ๋ ฌ ๋์ ํ์คํ ๊ทธ๋จ์ ๊ทธ๋ฆฐ๋ค.
# ํ์คํ ๊ทธ๋จ์ ๋ถํฌ์ ๋ํ ์ ๋ณด๋ ์ฃผ์ง๋ง, ์ํธ์์ฉ์ ๋ํ ์ ๋ณด๋ ์๋ค.
## PCA ๊ฐ์ฒด๋ฅผ ์์ฑํ๊ณ , fit ๋ฉ์๋๋ก ์ฃผ์ฑ๋ฌธ์ ์ฐพ๊ณ , transform ๋ฉ์๋๋ก ๋ฐ์ดํฐ๋ฅผ ํ์ ์ํค๊ณ ์ฐจ์์ ์ถ์ํ๋ค.
# ๋ฐ์ดํฐ๋ฅผ ์ค์ด๋ ค๋ฉด PCA ๊ฐ์ฒด๋ฅผ ๋ง๋ค ๋ ์ผ๋ง๋ ๋ง์ ์ฑ๋ถ์ ์ ์งํ ์ง ์๋ ค์ฃผ์ด์ผ ํ๋ค.\
## PCA๋ ๋น์ง๋ ํ์ต -> ํ์ ์ถ์ ์ฐพ์ ๋ ์ด๋ค ํด๋์ค ์ ๋ณด๋ ์ฌ์ฉํ์ง ์์.
## ๋จ์ : ๊ทธ๋ํ์ ๋ ์ถ์ ํด์ํ๊ธฐ ์ฝ์ง ์๋ค.
## PCA๋ ํน์ฑ ์ถ์ถ์๋ ์ด์ฉํ๋ค
# ํน์ฑ ์ถ์ถ -> ์๋ณธ ๋ฐ์ดํฐ ํํ๋ณด๋ค ๋ถ์ํ๊ธฐ์ ๋ ์ ํฉํ ํํ ์ฐพ์๋ณด๊ธฐ
# ๋ถ๋ฅ๊ธฐ๋ก๋ ํด๋์ค๋ณ ํ๋ จ ๋ฐ์ดํฐ๊ฐ ๋๋ฌด ์ ๊ณ , ๋งค๋ฒ ์ฌ ํ๋ จ ํ์
# ์๋ณธ ํฝ์
๊ณต๊ฐ์์ ๊ฑฐ๋ฆฌ๋ฅผ ๊ณ์ฐํ๋ ๊ฒ์ ๋งค์ฐ ๋์จ
# PCA์ ํ์ดํธ๋ ์ต์
์ ์ฉ -> ์ฃผ์ฑ๋ถ์ ์ค์ผ์ผ์ด ๊ฐ์์ง๋๋ก ์กฐ์ ํ๋ค.
# ์ด๋ ํ์ดํธ๋ ์์ด ๋ณํ ํ์ StandardScaler ์ ์ฉํ๋ ๊ฒ๊ณผ ๊ฐ๋ค.
## PCA๋ ๋ฐ์ดํฐ๋ฅผ ํ์ ์ํค๊ณ ๋ถ์ฐ์ด ์์ ์ฃผ์ฑ๋ถ์ ๋์ด๋ด๋ ๊ฒ์ด๋ค.
## PCA๋ฅผ ์ดํดํ๋ ๋ ๋ค๋ฅธ ๋ฐฉ๋ฒ์ ๋ช ๊ฐ์ ์ฃผ์ฑ๋ถ์ ์ฌ์ฉํด ์๋ณธ ๋ฐ์ดํฐ๋ฅผ ์ฌ๊ตฌ์ฑํ๋ ๊ฒ์ด๋ค.
# ์ฃผ์ฑ๋ถ์ ๋ ๋ง์ด ์ฌ์ฉํ ์๋ก ์๋ณธ์ ๋ ๊ฐ๊น์์ง๋ค.
### 3.4.2 ๋น์์ ํ๋ ฌ ๋ถํด(Non-negative Matrix Factorization)
## ์ ์ฉํ ํน์ฑ ๋ฝ๊ธฐ, ์ฐจ์ ์ถ์์ ์ฌ์ฉ ๊ฐ๋ฅ.
## NMF์์๋ ์์๊ฐ ์๋ ์ฑ๋ถ๊ณผ ๊ณ์์ ๊ฐ์ ์ฐพ๋๋ค. -> ์์์ธ ๋ฐ์ดํฐ์ ์ฌ์ฉ๋ถ๊ฐ
# PCA์์๋ ๋ฐ์ดํฐ์ ๋ถ์ฐ์ด ๊ฐ์ฅ ํฌ๊ณ ์์ง์ธ ์ฑ๋ถ ์ฐพ์์
## ์ฌ๋ฌ ์ฌ๋ ๋ชฉ์๋ฆฌ ๋ถํด, ์
๊ธฐ ๋ถํด, ํ
์คํธ ๋ฐ์ดํฐ ๋ฑ(๋ง๋ถ์ด๋ ๊ตฌ์กฐ๋ฅผ ๊ฐ์ง ๋ฐ์ดํฐ)์ ํนํ ์ ์ฉํจ.
## NMF์ ์ฃผ์ฑ๋ถ์ด ๋์ฒด๋ก ๋ ํด์ํ๊ธฐ ์ฝ๋ค.
# ์์๋ก ๋ ์ฑ๋ถ์ด๋ ๊ณ์๊ฐ ๋ง๋๋ ์์ ํจ๊ณผ๋ฅผ ์ดํดํ๊ธฐ ์ด๋ ค์ด PCA๋ณด๋ค
## ์ฃผ์ด์ง ๋ฐ์ดํฐ๊ฐ ์์์ธ์ง ํ์ธ
# ๋ฐ์ดํฐ๊ฐ ์์ ์์ ์๋์ ์ผ๋ก ์ด๋์ ๋์ฌ ์๋์ง๊ฐ ์ค์ํ๋ค.
## ์ฑ๋ถ์ด ํน์ฑ ๊ฐ์๋งํผ ๋ง๋ค๋ฉด
# ์๊ณ ๋ฆฌ์ฆ์ ๋ฐ์ดํฐ์ ๊ฐ ํน์ฑ์ ๋์ ์์นํ ํฌ์ธํธ๋ฅผ ๊ฐ๋ฆฌํค๋ ๋ฐฉํฅ ์ ํ.
## ์ฑ๋ถ์ ํ๋๋ง ์ฌ์ฉํ๋ค๋ฉด
# ํ๊ท ์ผ๋ก ํฅํ๋ ์ฑ๋ถ์ ๋ง๋ ๋ค.
## ์ฑ๋ถ ๊ฐ์๋ฅผ ์ค์ด๋ฉด ํน์ ๋ฐฉํฅ์ด ์ ๊ฑฐ๋ ๋ฟ๋ง ์๋๋ผ ์ ์ฒด ์ฑ๋ถ์ด ์์ ํ ๋ฐ๋๋ค!
# NMF์์ ์ฑ๋ถ์ ํน์ ๋ฐฉ์์ผ๋ก ์ ๋ ฌ๋์ด ์์ง๋ ์๋ค(์์๊ฐ ์๋ค).
# ๋ชจ๋ ์ฑ๋ถ์ ๋๋ฑํ๊ฒ ์ทจ๊ธํ๋ค.
## ๋์ ์์ฑ ์ด๊น๊ฐ์ ๋ฐ๋ผ ๊ฒฐ๊ณผ๊ฐ ๋ฌ๋ผ์ง๋ค.
# ๋ฐ์ดํฐ๊ฐ ๋ณต์กํ ๊ฒฝ์ฐ์๋ ๋์๊ฐ ํฐ ์ฐจ์ด ๋ง๋ค ์๋ ์๋ค.
## NMF๋ ๋ฐ์ดํฐ ์ธ์ฝ๋ฉ, ์ฌ๊ตฌ์ฑ๋ณด๋ค๋ ๋ฐ์ดํฐ์ ํจํด ์ฐพ๊ธฐ์ ๋ ์ ์ฉํ๋ค.
# PCA๊ฐ ์ฌ๊ตฌ์ฑ ์ธก๋ฉด์์ ์ต์ ์ ๋ฐฉํฅ์ ์ฐพ๋๋ค.
## ์ฑ๋ถ๊ณผ ๊ณ์์ ์๋ ์ ์ฝ์ ์ค๋ช
ํ๋ ค๋ฉด ํ๋ฅ ์ด๋ก ์ด ํ์ํ๋ค.
### 3.4.3 t-SNE๋ฅผ ์ด์ฉํ ๋งค๋ํด๋ ํ์ต
## ํจ์ฌ ๋ณต์กํ ๋งคํ์ ๋ง๋ค์ด ๋ ๋์ ์๊ฐํ๋ฅผ ์ ๊ณตํ๋ค.
## ์๋ก์ด ๋ฐ์ดํฐ์๋ ์ ์ฉํ์ง ๋ชปํ๋ค.
# ํ์์ ๋ถ์์๋ ์ ์ฉํ์ง๋ง, ์ง๋ ํ์ต์ฉ์ผ๋ก๋ ์ฌ์ฉํ์ง ๋ชปํ๋ค.
## t-SNE๋ ์ ๋ฐ์ดํฐ๋ฅผ ๋ณํํ๋ ๊ธฐ๋ฅ์ ์ ๊ณตํ์ง ์์
# TSBE ๋ชจ๋ธ์๋ transform ๋ฉ์๋๊ฐ ์์
# ๋์ ๋ชจ๋ธ์ ๋ง๋ค์๋ง์ ๋ฐ์ดํฐ๋ฅผ ๋ณํ์ํค๋ fit_transform ๋ฉ์๋ ์ฌ์ฉ๊ฐ๋ฅ
## ํด๋์ค ๋ ์ด๋ธ ์ ๋ณด๋ฅผ ์ฌ์ฉํ์ง ์์ -> ์์ ํ ๋น์ง๋ ํ์ต
import sys
print("Python ๋ฒ์ :", sys.version)
import pandas as pd
print("pandas ๋ฒ์ :", pd.__version__)
import matplotlib
from matplotlib import pyplot as plt
plt.show()
print("matplotlib ๋ฒ์ :", matplotlib.__version__)
import numpy as np
print("NumPy ๋ฒ์ :", np.__version__)
import scipy as sp
print("SciPy ๋ฒ์ :", sp.__version__)
import IPython
print("IPython ๋ฒ์ :", IPython.__version__)
import sklearn
print("scikit-learn ๋ฒ์ :", sklearn.__version__)
from sklearn.datasets import load_iris
iris_dataset = load_iris()
import mglearn
mglearn.plots.plot_scaling()
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
cancer = load_breast_cancer()
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target,
random_state=1)
print(X_train.shape)
print(X_test.shape)
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
# ๋ฐ์ดํฐ ๋ณํ
# fit ๋ฉ์๋๋ก ํ์ตํ ๋ณํ์ ์ ์ฉํ๋ ค๋ฉด,
# ์ฆ ์ค์ ๋ก ํ๋ จ ๋ฐ์ดํฐ์ ์ค์ผ์ผ์ ์กฐ์ ํ๋ ค๋ฉด ์ค์ผ์ผ ๊ฐ์ฒด์ transform ๋ฉ์๋๋ฅผ ์ฌ์ฉ
X_train_scaled = scaler.transform(X_train)
# ์ค์ผ์ผ์ด ์กฐ์ ๋ ํ ๋ฐ์ดํฐ์
์ ์์ฑ์ ์ถ๋ ฅํฉ๋๋ค
print("๋ณํ๋ ํ ํฌ๊ธฐ:", X_train_scaled.shape)
print("์ค์ผ์ผ ์กฐ์ ์ ํน์ฑ๋ณ ์ต์๊ฐ:\n", X_train.min(axis=0))
print("์ค์ผ์ผ ์กฐ์ ์ ํน์ฑ๋ณ ์ต๋๊ฐ:\n", X_train.max(axis=0))
print("์ค์ผ์ผ ์กฐ์ ํ ํน์ฑ๋ณ ์ต์๊ฐ:\n", X_train_scaled.min(axis=0))
print("์ค์ผ์ผ ์กฐ์ ํ ํน์ฑ๋ณ ์ต๋๊ฐ:\n", X_train_scaled.max(axis=0))
# ํ
์คํธ ๋ฐ์ดํฐ ๋ณํ
# ์ด ๋ฐ์ดํฐ์ SVM์ ์ ์ฉํ๋ ค๋ฉด ํ
์คํธ ์ธํธ๋ ๋ณํํด์ผ ํ๋ค.
X_test_scaled = scaler.transform(X_test) # ๊ฐ์ ๋ณํ!!!
# ์ค์ผ์ผ์ด ์กฐ์ ๋ ํ ํ
์คํธ ๋ฐ์ดํฐ์ ์์ฑ์ ์ถ๋ ฅํฉ๋๋ค
print("์ค์ผ์ผ ์กฐ์ ํ ํน์ฑ๋ณ ์ต์๊ฐ:\n", X_test_scaled.min(axis=0))
print("์ค์ผ์ผ ์กฐ์ ํ ํน์ฑ๋ณ ์ต๋๊ฐ:\n", X_test_scaled.max(axis=0))
# ๋์ผํ ๋ฐฉ๋ฒ์ผ๋ก ํ๋ จ ๋ฐ์ดํฐ์ ํ
์คํธ ๋ฐ์ดํฐ์ ์ค์ผ์ผ์ ์กฐ์ ํ๊ธฐ
# matplotlib 3.0 ๋ฒ์ ์์๋ scatter ํจ์์ ์๊น์ ์ง์ ํ ๋
# ํ๋์ RGB ํฌ๋งท ๋ฌธ์์ด์ด๋ Colormap์ ๋ฆฌ์คํธ๋ฅผ ์ง์ ํด์ผ ํฉ๋๋ค.
# ๊ฒฝ๊ณ ๋ฅผ ํผํ๊ธฐ ์ํด mglearn์์ ๋ง๋ ListedColormap ๊ฐ์ฒด์ colors ์์ฑ์ ์์๋ฅผ
# ์ง์ ์ ํํ์ฌ RGB ํฌ๋งท ๋ฌธ์์ด์ ์ง์ ํฉ๋๋ค.
from sklearn.datasets import make_blobs
# ์ธ์์ ์ธ ๋ฐ์ดํฐ์
์์ฑ
X, _ = make_blobs(n_samples=50, centers=5, random_state=4, cluster_std=2)
# ํ๋ จ ์ธํธ์ ํ
์คํธ ์ธํธ๋ก ๋๋๋๋ค
X_train, X_test = train_test_split(X, random_state=5, test_size=.1)
# ํ๋ จ ์ธํธ์ ํ
์คํธ ์ธํธ์ ์ฐ์ ๋๋ฅผ ๊ทธ๋ฆฝ๋๋ค
fig, axes = plt.subplots(1, 3, figsize=(13, 4))
axes[0].scatter(X_train[:, 0], X_train[:, 1],
c=mglearn.cm2.colors[0], label="ํ๋ จ ์ธํธ", s=60)
axes[0].scatter(X_test[:, 0], X_test[:, 1], marker='^',
c=mglearn.cm2.colors[1], label="ํ
์คํธ ์ธํธ", s=60)
axes[0].legend(loc='upper left')
axes[0].set_title("์๋ณธ ๋ฐ์ดํฐ")
# MinMaxScaler๋ฅผ ์ฌ์ฉํด ์ค์ผ์ผ์ ์กฐ์ ํฉ๋๋ค
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# ์ค์ผ์ผ์ด ์กฐ์ ๋ ๋ฐ์ดํฐ์ ์ฐ์ ๋๋ฅผ ๊ทธ๋ฆฝ๋๋ค
axes[1].scatter(X_train_scaled[:, 0], X_train_scaled[:, 1],
c=mglearn.cm2.colors[0], label="ํ๋ จ ์ธํธ", s=60)
axes[1].scatter(X_test_scaled[:, 0], X_test_scaled[:, 1], marker='^',
c=mglearn.cm2.colors[1], label="ํ
์คํธ ์ธํธ", s=60)
axes[1].set_title("์ค์ผ์ผ ์กฐ์ ๋ ๋ฐ์ดํฐ")
# ํ
์คํธ ์ธํธ์ ์ค์ผ์ผ์ ๋ฐ๋ก ์กฐ์ ํฉ๋๋ค
# ํ
์คํธ ์ธํธ์ ์ต์๊ฐ์ 0, ์ต๋๊ฐ์ 1์ด ๋ฉ๋๋ค
# ์ด๋ ์์ ๋ฅผ ์ํ ๊ฒ์ผ๋ก ์ ๋๋ก ์ด๋ ๊ฒ ์ฌ์ฉํด์๋ ์๋ฉ๋๋ค
test_scaler = MinMaxScaler()
test_scaler.fit(X_test)
X_test_scaled_badly = test_scaler.transform(X_test) # ๋ ๋ค 0์์ 1๊น์ง ์ ๋ ฌ๋์ด๋ฒ๋ฆผ
# ์๋ชป ์กฐ์ ๋ ๋ฐ์ดํฐ์ ์ฐ์ ๋๋ฅผ ๊ทธ๋ฆฝ๋๋ค
axes[2].scatter(X_train_scaled[:, 0], X_train_scaled[:, 1],
c=mglearn.cm2.colors[0], label="training set", s=60)
axes[2].scatter(X_test_scaled_badly[:, 0], X_test_scaled_badly[:, 1],
marker='^', c=mglearn.cm2.colors[1], label="test set", s=60)
axes[2].set_title("์๋ชป ์กฐ์ ๋ ๋ฐ์ดํฐ")
for ax in axes:
ax.set_xlabel("ํน์ฑ 0")
ax.set_ylabel("ํน์ฑ 1")
fig.tight_layout()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# ๋ฉ์๋ ์ฒด์ด๋(chaining)์ ์ฌ์ฉํ์ฌ fit๊ณผ transform์ ์ฐ๋ฌ์ ํธ์ถํฉ๋๋ค
X_scaled = scaler.fit(X_train).transform(X_train)
# ์์ ๋์ผํ์ง๋ง ๋ ํจ์จ์ ์
๋๋ค
X_scaled_d = scaler.fit_transform(X_train)
# ์ง๋ ํ์ต์์ ๋ฐ์ดํฐ ์ ์ฒ๋ฆฌ ํจ๊ณผ
#์ฌ์ดํท๋ฐ 0.20 ๋ฒ์ ์์ SVC ํด๋์ค์ gamma ๋งค๊ฐ๋ณ์ ์ต์
์ auto์ธ์ scale์ด ์ถ๊ฐ๋์์ต๋๋ค.
# auto๋ 1/n_features, ์ฆ ํน์ฑ ๊ฐ์์ ์ญ์์
๋๋ค.
# scale์ 1/(n_features * X.std())๋ก ์ค์ผ์ผ ์กฐ์ ์ด ๋์ง ์์ ํน์ฑ์์ ๋ ์ข์ ๊ฒฐ๊ณผ๋ฅผ ๋ง๋ญ๋๋ค.
# ์ฌ์ดํท๋ฐ 0.22 ๋ฒ์ ๋ถํฐ๋ gamma ๋งค๊ฐ๋ณ์์ ๊ธฐ๋ณธ๊ฐ์ด auto์์ scale๋ก ๋ณ๊ฒฝ๋ฉ๋๋ค.
# ์ํฌํธ ๋ฒกํฐ ๋จธ์ ์ ์ฌ์ฉํ๊ธฐ ์ ์ ํน์ฑ์ ํ์คํ ์ ์ฒ๋ฆฌํ๋ฉด scale๊ณผ auto๋ ์ฐจ์ด๊ฐ ์์ต๋๋ค.
from sklearn.svm import SVC
X_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target,
random_state=0)
svm = SVC(C=100)
svm.fit(X_train, y_train)
print("ํ
์คํธ ์ธํธ ์ ํ๋: {:.2f}".format(svm.score(X_test, y_test)))
# 0~1 ์ฌ์ด๋ก ์ค์ผ์ผ ์กฐ์
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# ์กฐ์ ๋ ๋ฐ์ดํฐ๋ก SVM ํ์ต
svm.fit(X_train_scaled, y_train)
# ์ค์ผ์ผ ์กฐ์ ๋ ํ
์คํธ ์ธํธ์ ์ ํ๋
print("์ค์ผ์ผ ์กฐ์ ๋ ํ
์คํธ ์ธํธ์ ์ ํ๋: {:.2f}".format(svm.score(X_test_scaled, y_test)))
# ํ๊ท 0, ๋ถ์ฐ 1์ ๊ฐ๋๋ก ์ค์ผ์ผ ์กฐ์
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train)
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# ์กฐ์ ๋ ๋ฐ์ดํฐ๋ก SVM ํ์ต
svm.fit(X_train_scaled, y_train)
# ์ค์ผ์ผ ์กฐ์ ๋ ํ
์คํธ ์ธํธ์ ์ ํ๋
print("SVM test accuracy: {:.2f}".format(svm.score(X_test_scaled, y_test)))
#### 3.4
### 3.4.1
mglearn.plots.plot_pca_illustration()
## ์๊ฐํ๋ฅผ ์ํด ์ ๋ฐฉ์ ๋ฐ์ดํฐ์
์ PCA ์ ์ฉํ๊ธฐ
fig, axes = plt.subplots(15, 2, figsize=(10, 20))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target == 1]
ax = axes.ravel()
# ์ฐ์ ๋ ํ๋ ฌ๋ก๋ ๋ถ์ ๋ถ๊ฐ(435๊ฐ), ํ์คํ ๊ทธ๋จ ๊ทธ๋ฆฐ๋ค.
for i in range(30):
_, bins = np.histogram(cancer.data[:, i], bins=50)
ax[i].hist(malignant[:, i], bins=bins, color=mglearn.cm3(0), alpha=.5)
ax[i].hist(benign[:, i], bins=bins, color=mglearn.cm3(2), alpha=.5)
ax[i].set_title(cancer.feature_names[i])
ax[i].set_yticks(())
ax[0].set_xlabel("ํน์ฑ ํฌ๊ธฐ")
ax[0].set_ylabel("๋น๋")
ax[0].legend(["์
์ฑ", "์์ฑ"], loc="best")
fig.tight_layout()
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
scaler = StandardScaler()
scaler.fit(cancer.data)
X_scaled = scaler.transform(cancer.data)
from sklearn.decomposition import PCA
# ๋ฐ์ดํฐ์ ์ฒ์ ๋ ๊ฐ ์ฃผ์ฑ๋ถ๋ง ์ ์ง์ํต๋๋ค
pca = PCA(n_components=2)
# ์ ๋ฐฉ์ ๋ฐ์ดํฐ๋ก PCA ๋ชจ๋ธ์ ๋ง๋ญ๋๋ค
pca.fit(X_scaled)
# ์ฒ์ ๋ ๊ฐ์ ์ฃผ์ฑ๋ถ์ ์ฌ์ฉํด ๋ฐ์ดํฐ๋ฅผ ๋ณํํฉ๋๋ค
X_pca = pca.transform(X_scaled)
print("์๋ณธ ๋ฐ์ดํฐ ํํ:", str(X_scaled.shape))
print("์ถ์๋ ๋ฐ์ดํฐ ํํ:", str(X_pca.shape))
# ํด๋์ค๋ฅผ ์๊น๋ก ๊ตฌ๋ถํ์ฌ ์ฒ์ ๋ ๊ฐ์ ์ฃผ์ฑ๋ถ์ ๊ทธ๋ํ๋ก ๋ํ๋
๋๋ค.
plt.figure(figsize=(8, 8))
mglearn.discrete_scatter(X_pca[:, 0], X_pca[:, 1], cancer.target)
plt.legend(["์
์ฑ", "์์ฑ"], loc="best")
plt.gca().set_aspect("equal")
plt.xlabel("์ฒซ ๋ฒ์งธ ์ฃผ์ฑ๋ถ")
plt.ylabel("๋ ๋ฒ์งธ ์ฃผ์ฑ๋ถ")
print("PCA ์ฃผ์ฑ๋ถ ํํ:", pca.components_.shape)
print("PCA ์ฃผ์ฑ๋ถ:", pca.components_)
plt.matshow(pca.components_, cmap='viridis')
plt.yticks([0, 1], ["์ฒซ ๋ฒ์งธ ์ฃผ์ฑ๋ถ", "๋ ๋ฒ์งธ ์ฃผ์ฑ๋ถ"])
plt.colorbar()
plt.xticks(range(len(cancer.feature_names)),
cancer.feature_names, rotation=60, ha='left')
plt.xlabel("ํน์ฑ")
plt.ylabel("์ฃผ์ฑ๋ถ")
## ๊ณ ์ ์ผ๊ตด ํน์ฑ ์ถ์ถ
from sklearn.datasets import fetch_lfw_people
people = fetch_lfw_people(min_faces_per_person=20, resize=0.7)
image_shape = people.images[0].shape
fig, axes = plt.subplots(2, 5, figsize=(15, 8),
subplot_kw={'xticks': (), 'yticks': ()})
for target, image, ax in zip(people.target, people.images, axes.ravel()):
ax.imshow(image)
ax.set_title(people.target_names[target])
people.target[0:10], people.target_names[people.target[0:10]]
print("people.images.shape:", people.images.shape)
print("ํด๋์ค ๊ฐ์:", len(people.target_names))
# ๊ฐ ํ๊น์ด ๋ํ๋ ํ์ ๊ณ์ฐ
counts = np.bincount(people.target)
# ํ๊น๋ณ ์ด๋ฆ๊ณผ ํ์ ์ถ๋ ฅ
for i, (count, name) in enumerate(zip(counts, people.target_names)):
print("{0:25} {1:3}".format(name, count), end=' ')
if (i + 1) % 3 == 0:
print()
mask = np.zeros(people.target.shape, dtype=np.bool)
for target in np.unique(people.target):
mask[np.where(people.target == target)[0][:50]] = 1
X_people = people.data[mask]
y_people = people.target[mask]
# 0~255 ์ฌ์ด์ ํ๋ฐฑ ์ด๋ฏธ์ง์ ํฝ์
๊ฐ์ 0~1 ์ฌ์ด๋ก ์ค์ผ์ผ ์กฐ์ ํฉ๋๋ค.
# (์ฎ๊ธด์ด) MinMaxScaler๋ฅผ ์ ์ฉํ๋ ๊ฒ๊ณผ ๊ฑฐ์ ๋์ผํฉ๋๋ค.
X_people = X_people / 255.
from sklearn.neighbors import KNeighborsClassifier
# ๋ฐ์ดํฐ๋ฅผ ํ๋ จ ์ธํธ์ ํ
์คํธ ์ธํธ๋ก ๋๋๋๋ค
X_train, X_test, y_train, y_test = train_test_split(
X_people, y_people, stratify=y_people, random_state=0)
# ์ด์ ๊ฐ์๋ฅผ ํ ๊ฐ๋ก ํ์ฌ KNeighborsClassifier ๋ชจ๋ธ์ ๋ง๋ญ๋๋ค
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train, y_train)
print("1-์ต๊ทผ์ ์ด์์ ํ
์คํธ ์ธํธ ์ ์: {:.2f}".format(knn.score(X_test, y_test)))
mglearn.plots.plot_pca_whitening()
pca = PCA(n_components=100, whiten=True, random_state=0).fit(X_train)
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("X_train_pca.shape:", X_train_pca.shape)
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X_train_pca, y_train)
print("ํ
์คํธ ์ธํธ ์ ํ๋: {:.2f}".format(knn.score(X_test_pca, y_test)))
print("pca.components_.shape:", pca.components_.shape)
fig, axes = plt.subplots(3, 5, figsize=(15, 12),
subplot_kw={'xticks': (), 'yticks': ()})
for i, (component, ax) in enumerate(zip(pca.components_, axes.ravel())):
ax.imshow(component.reshape(image_shape), cmap='viridis')
ax.set_title("์ฃผ์ฑ๋ถ {}".format((i + 1)))
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
image_shape = people.images[0].shape
plt.figure(figsize=(20, 3))
ax = plt.gca()
imagebox = OffsetImage(people.images[0], zoom=2, cmap="gray")
ab = AnnotationBbox(imagebox, (.05, 0.4), pad=0.0, xycoords='data')
ax.add_artist(ab)
for i in range(4):
imagebox = OffsetImage(pca.components_[i].reshape(image_shape), zoom=2,
cmap="viridis")
ab = AnnotationBbox(imagebox, (.285 + .2 * i, 0.4),
pad=0.0, xycoords='data')
ax.add_artist(ab)
if i == 0:
plt.text(.155, .3, 'x_{} *'.format(i), fontdict={'fontsize': 30})
else:
plt.text(.145 + .2 * i, .3, '+ x_{} *'.format(i),
fontdict={'fontsize': 30})
plt.text(.95, .3, '+ ...', fontdict={'fontsize': 30})
plt.rc('text')
plt.text(.12, .3, '=', fontdict={'fontsize': 30})
plt.axis("off")
plt.show()
plt.close()
plt.rc('text')
mglearn.plots.plot_pca_faces(X_train, X_test, image_shape)
mglearn.discrete_scatter(X_train_pca[:, 0], X_train_pca[:, 1], y_train)
plt.xlabel("์ฒซ ๋ฒ์งธ ์ฃผ์ฑ๋ถ")
plt.ylabel("๋ ๋ฒ์งธ ์ฃผ์ฑ๋ถ")
### 3.4.2
## ์ธ๊ณต ๋ฐ์ดํฐ์ NMF ์ ์ฉ
mglearn.plots.plot_nmf_illustration()
## ์ผ๊ตด ์ด๋ฏธ์ง์ NMF ์ ์ฉ
mglearn.plots.plot_nmf_faces(X_train, X_test, image_shape)
from sklearn.decomposition import NMF
nmf = NMF(n_components=15, random_state=0)
nmf.fit(X_train)
X_train_nmf = nmf.transform(X_train)
X_test_nmf = nmf.transform(X_test)
fig, axes = plt.subplots(3, 5, figsize=(15, 12),
subplot_kw={'xticks': (), 'yticks': ()})
for i, (component, ax) in enumerate(zip(nmf.components_, axes.ravel())):
ax.imshow(component.reshape(image_shape))
ax.set_title("์ฑ๋ถ {}".format(i))
compn = 3
# 4๋ฒ์งธ ์ฑ๋ถ์ผ๋ก ์ ๋ ฌํ์ฌ ์ฒ์ 10๊ฐ ์ด๋ฏธ์ง๋ฅผ ์ถ๋ ฅํฉ๋๋ค
inds = np.argsort(X_train_nmf[:, compn])[::-1]
fig, axes = plt.subplots(2, 5, figsize=(15, 8),
subplot_kw={'xticks': (), 'yticks': ()})
for i, (ind, ax) in enumerate(zip(inds, axes.ravel())):
ax.imshow(X_train[ind].reshape(image_shape))
compn = 7
# 8๋ฒ์งธ ์ฑ๋ถ์ผ๋ก ์ ๋ ฌํ์ฌ ์ฒ์ 10๊ฐ ์ด๋ฏธ์ง๋ฅผ ์ถ๋ ฅํฉ๋๋ค
inds = np.argsort(X_train_nmf[:, compn])[::-1]
fig, axes = plt.subplots(2, 5, figsize=(15, 8),
subplot_kw={'xticks': (), 'yticks': ()})
for i, (ind, ax) in enumerate(zip(inds, axes.ravel())):
ax.imshow(X_train[ind].reshape(image_shape))
### 3.4.3
from sklearn.datasets import load_digits
digits = load_digits()
fig, axes = plt.subplots(2, 5, figsize=(10, 5),
subplot_kw={'xticks':(), 'yticks': ()})
for ax, img in zip(axes.ravel(), digits.images):
ax.imshow(img)
# PCA ๋ชจ๋ธ์ ์์ฑํฉ๋๋ค
pca = PCA(n_components=2)
pca.fit(digits.data)
# ์ฒ์ ๋ ๊ฐ์ ์ฃผ์ฑ๋ถ์ผ๋ก ์ซ์ ๋ฐ์ดํฐ๋ฅผ ๋ณํํฉ๋๋ค
digits_pca = pca.transform(digits.data)
colors = ["#476A2A", "#7851B8", "#BD3430", "#4A2D4E", "#875525",
"#A83683", "#4E655E", "#853541", "#3A3120","#535D8E"]
plt.figure(figsize=(10, 10))
plt.xlim(digits_pca[:, 0].min(), digits_pca[:, 0].max())
plt.ylim(digits_pca[:, 1].min(), digits_pca[:, 1].max())
for i in range(len(digits.data)):
# ์ซ์ ํ
์คํธ๋ฅผ ์ด์ฉํด ์ฐ์ ๋๋ฅผ ๊ทธ๋ฆฝ๋๋ค
plt.text(digits_pca[i, 0], digits_pca[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
plt.xlabel("์ฒซ ๋ฒ์งธ ์ฃผ์ฑ๋ถ")
plt.ylabel("๋ ๋ฒ์งธ ์ฃผ์ฑ๋ถ")
from sklearn.manifold import TSNE
tsne = TSNE(random_state=42)
# TSNE์๋ transform ๋ฉ์๋๊ฐ ์์ผ๋ฏ๋ก ๋์ fit_transform์ ์ฌ์ฉํฉ๋๋ค
digits_tsne = tsne.fit_transform(digits.data)
plt.figure(figsize=(10, 10))
plt.xlim(digits_tsne[:, 0].min(), digits_tsne[:, 0].max() + 1)
plt.ylim(digits_tsne[:, 1].min(), digits_tsne[:, 1].max() + 1)
for i in range(len(digits.data)):
# ์ซ์ ํ
์คํธ๋ฅผ ์ด์ฉํด ์ฐ์ ๋๋ฅผ ๊ทธ๋ฆฝ๋๋ค
plt.text(digits_tsne[i, 0], digits_tsne[i, 1], str(digits.target[i]),
color = colors[digits.target[i]],
fontdict={'weight': 'bold', 'size': 9})
plt.xlabel("t-SNE ํน์ฑ 0")
plt.ylabel("t-SNE ํน์ฑ 1")
#### 3.5 ๊ตฐ์ง
### 3.5.1 k-ํ๊ท ๊ตฐ์ง
## ์ด ์๊ณ ๋ฆฌ์ฆ์ ๋ฐ์ดํฐ์ ์ด๋ค ์์ญ์ ๋ํํ๋ ํด๋ฌ์คํฐ ์ค์์ ์ฐพ๋๋ค.
# ๋จผ์ ๋ฐ์ดํฐ ํฌ์ธํธ๋ฅผ ๊ฐ์ฅ ๊ฐ๊น์ด ํด๋ฌ์คํฐ ์ค์ฌ์ ํ ๋น
# ๊ทธ ํ ํด๋ฌ์คํฐ์ ํ ๋น๋ ๋ฐ์ดํฐ ํฌ์ธํธ์ ํ๊ท ์ผ๋ก ํด๋ฌ์คํฐ ์ค์ฌ ์ฌ์ง์
## dataset์ cluster ๊ฐฏ์ ์ ํํ ์์๋ k-mean ์๊ณ ๋ฆฌ์ฆ์ด ๊ตฌ๋ถ ๋ชปํ ์ ์์.
# k-mean์ ๋ชจ๋ ํด๋ฌ์คํฐ์ ๋ฐ๊ฒฝ์ด ๋๊ฐ๋ค๊ณ ๊ฐ์ .
# ๋ณต์กํ ํํ๋ผ๋ฉด k-mean์ ์ฑ๋ฅ์ด ๋ ๋๋น ์ง๋ค.
## ์ค์ฌ์ผ๋ก ๊ฐ ๋ฐ์ดํฐ ํฌ์ธํธ๋ฅผ ํํ
# ๊ฐ data point๊ฐ ํ๋์ cluster ์ค์ฌ, ์ฆ ํ๋์ ์ฑ๋ถ์ผ๋ก ํํ๋๋ค๊ณ ๋ณธ๋ค.
# ์ด๋ ๊ฒ ๊ฐ ํฌ์ธํธ๊ฐ ํ๋์ ์ฑ๋ถ์ผ๋ก ๋ถํด๋๋ ๊ด์ ์ผ๋ก ๋ณด๋ ๊ฒ -> ๋ฒกํฐ ์์ํ(vector quantization)
# ๋ฒกํฐ ์์ํ์ ํฅ๋ฏธ๋ก์ด ์ -> ๋ฐ์ดํฐ์ ์ฐจ์๋ณด๋ค ๋ ๋ง์ ํด๋ฌ์คํฐ ์จ์ data encoding ๊ฐ๋ฅ
# ์ฐจ์์ ๋์ฌ์ ๋ ๋ณต์กํ ํํ์ data๋ ๊ตฐ์ง ๊ฐ๋ฅ
## ๋จ์
# ๋ฌด์์ ์ด๊ธฐํ ์ฌ์ฉ -> algorithm output์ด ์ด๊ธฐ ๋์์ ๋ฐ๋ผ ๋ฌ๋ผ์ง.
# ํด๋ฌ์คํฐ์ ๋ชจ์์ ๋ฏธ๋ฆฌ ๊ฐ์ -> ํ์ฉ ๋ฒ์ ์ ํ์
# ์ฐพ์ผ๋ ค ํ๋ ํด๋ฌ์คํฐ ๊ฐฏ์ ์ง์ ํ์
### ๊ตฐ์ง์ ๊ฐ ๋ฐ์ดํฐ ํฌ์ธํธ๊ฐ ๋ ์ด๋ธ์ ๊ฐ์ง๋ค๋ ์ ์์๋ ๋ถ๋ฅ์ ๋น์ทํ๋ค.
## ๊ทธ๋ฌ๋ ์ ๋ต์ ๋ชจ๋ฅด๊ณ ์์ผ๋ฉฐ, ๋ ์ด๋ธ ์์ฒด์ ์ด๋ค ์๋ฏธ๊ฐ ์๋ ๊ฒ์ ์๋๋ค.
# ํด๋ฌ์คํฐ n์๋ ํ ์ฌ๋์ ์ผ๊ตด๋ง ๋ด๊ฒจ ์์ ์ ์๋ค.
# ํ์ง๋ง ์ด๋ ์ฌ์ง์ ์ง์ ๋ด์ผ ์ ์ ์์ผ๋ฉฐ, ์ซ์ n์ ์๋ฌด๋ฐ ์๋ฏธ๊ฐ ์๋ค.
# n์ด๋ผ๊ณ labeled๋ ์ผ๊ตด๋ค์ ๋ชจ๋ ์ด๋ค ์ ์์ ๋น์ทํ๋ค๋ ๊ฒ ๋ฟ์ด๋ค.
### 3.5.2 ๋ณํฉ ๊ตฐ์ง
## ๋ณํ ๊ตฐ์ง ์๊ณ ๋ฆฌ์ฆ์ ์์ํ ๋ ๊ฐ ํฌ์ธํธ๋ฅผ ํ๋์ ํด๋ฌ์คํ ๋ก ์ง์
## ๊ทธ ํ ์ด๋ค ์กฐ๊ฑด์ ๋ง์กฑํ ๋๊น์ง ๊ฐ์ฅ ๋น์ทํ ๋ ํด๋ฌ์คํฐ๋ฅผ ํฉ์ณ๋๊ฐ.
## scikit-learn
# ward : ํด๋ฌ์คํฐ ๋ด์ ๋ถ์ฐ ๊ฐ์ฅ ์ ๊ฒ ์ฆ๊ฐ์ํค๋ ๋ ํด๋ฌ์คํฐ ํฉ์นจ
# average : ํ๊ท ๊ฑฐ๋ฆฌ๊ฐ ๊ฐ์ฅ ์งง์ ๋ ํด๋ฌ์คํฐ ํฉ์นจ
# complete : ํด๋ฌ์คํฐ ํฌ์ธํธ ์ฌ์ด์ ์ต๋ ๊ฑฐ๋ฆฌ ๊ฐ์ฅ ์งง์ ๋ ํฉ์นจ
## ์๊ณ ๋ฆฌ์ฆ ํน์ฑ์ ์ธ๋ก์ด ๋ฐ์ดํฐ ํฌ์ธํธ์ ๋ํด ์์ธก ๋ถ๊ฐ๋ฅ
# ๋ณํฉ ๊ตฐ์ง์ predict ๋ฉ์๋ ๋์ fit_predict ๋ฉ์๋ ์ฌ์ฉ
## ๊ณ์ธต์ ๊ตฐ์ง๊ณผ ๋ด๋๋ก๊ทธ๋จ
# ๊ตฐ์ง ๋ฐ๋ณต -> ํฌ์ธํธ 1๊ฐ์ง๋ฆฌ ํด๋ฌ์คํฐ์์ ๋ง์ง๋ง ํด๋ฌ์คํฐ๊น์ง ์ด๋
# ๋ด๋๋ก๊ทธ๋จ์ผ๋ก 2์ฐจ์ ์ด์์ ๋ฐ์ดํฐ์
๋ ์๊ฐํ ๊ฐ๋ฅ
### 3.5.3 DBSCAN
## ํด๋ฌ์คํฐ์ ๊ฐ์๋ฅผ ๋ฏธ๋ฆฌ ์ง์ ํ ํ์๊ฐ ์์
## ๋ค์ ๋๋ฆฌ์ง๋ง, ํฐ ๋ฐ์ดํฐ์
์๋ ์ ์ฉ ๊ฐ์
## ํน์ฑ ๊ณต๊ฐ์์ ๊ฐ๊น์ด ์๋ ๋ฐ์๊ฐ ๋ง์ด ๋ถ๋น๋ ์ง์ญ์ ํฌ์ธํธ๋ฅผ ์ฐพ๋๋ค.
# ์ด๋ฅผ ํน์ฑ ๊ณต๊ฐ์ dense region์ด๋ผ๊ณ ํ๋ค.
# ๋น์ด ์๋ ์ง์ญ์ ๊ฒฝ๊ณ๋ก ๋ค๋ฅธ ํด๋ฌ์คํฐ์ ๊ตฌ๋ถ๋๋ค.
## ๋ฌด์์๋ก ํฌ์ธํธ๋ฅผ ์ ํ
# eps ๊ฑฐ๋ฆฌ ์์ ์๋ ๋ชจ๋ ํฌ์ธํธ ์ฐพ๊ธฐ.
# eps ์์ ์๋ ํฌ์ธํธ ์๊ฐ min_sample ์๋ณด๋ค ์ ์ -> ์ก์์ผ๋ก ๋ ์ด๋ธ
# min_sample ์๋ณด๋ค ๋ง์ -> ํต์ฌ ์ํ๋ก ๋ ์ด๋ธ -> ๊ฑฐ๋ฆฌ ์์ ๋ชจ๋ ์ด์ ๋ณด๊ธฐ
# ์ด๋ค ํด๋ฌ์คํฐ์๋ ์์ง ํ ๋น ์๋ ๊ฒ ๆ -> ๋ฐ๋ก ์ ์ ๋ง๋ label allocate
## DBSCAN์ ํ dataset์ ๋คํ ์คํ -> ํต์ฌ ํฌ์ธํธ ๊ตฐ์ง, ์ก์ ํญ์ ๊ฐ์.
## ํฌ์ธํธ ์์์ ์ํด ๋ฐ๋ ์ํฅ์ ์ ๋ค.
## min_sample ์ค์ ์, ๋ ์กฐ๋ฐํ ์ง์ญ์ ์๋ ํฌ์ธํธ๋ค์ด
# ์ก์์ด ๋ ์ง ํด๋ฌ์คํฐ๊ฐ ๋ ์ง๋ฅผ ๊ฒฐ์ ํ๋๋ฐ ์ค์ํ ์ญํ ์ ํ๋ค.
## cluster ๊ฐฏ์ ์ง์ ํ์๋ ์์ง๋ง, eps ๊ฐ์ ๊ฐ์ ์ ์ผ๋ก cluster ๊ฐฏ์๋ฅผ ์ ์ด.
# ์ ์ ํ eps ์ฐพ์ผ๋ ค๋ฉด StandardScaler๋ MinMaxScaler๋ก ์กฐ์ ํ์
# eps ๋ด๋ฆฌ๋ฉด ๋๋ฌด ๋ง์ ํด๋ฌ์คํฐ ์์ฑํ ์๋ ์์.
# ํด๋ฌ์คํฐ ํ ๋น
# ๊ฐ์ ์ฃผ์ํด์ ๋ค๋ฃฐ ๊ฒ!
### 3.5.4 ๊ตฐ์ง ์๊ณ ๋ฆฌ์ฆ์ ๋น๊ต์ ํ๊ฐ
## ์๊ณ ๋ฆฌ์ฆ์ด ์ ๋๋์ง, ํน์ ๋น๊ตํ๊ธฐ๊ฐ ์ด๋ ค์.
## ํ๊น๊ฐ์ผ๋ก ๊ตฐ์ง ํ๊ฐํ๊ธฐ
# ARI : Adjusted rand index(์์ ๊ฐ๋ฅํ๊ธด ํจ)
# NMI : normalized mutual information
## ARI๋ NMI ๊ฐ์ ๊ตฐ์ง์ฉ ์ธก์ ๋๊ตฌ๋ฅผ ์ฌ์ฉ ์๊ณ , ์ ํ๋ ์ธก์ ํ๋ฉด ์๋๋ค!
# ์ ํ๋ ์ฐ๋ฉด ํ ๋น๋ ํด๋ฌ์คํฐ ๋ ์ด๋ธ์ด ์ค์ ๋ ์ด๋ธ๊ณผ ๋ง๋์ง ํ์ธํ๋ค.
# ํ์ง๋ง cluster label์ ๊ทธ ์์ฒด๋ก๋ ์๋ฏธ๊ฐ ์๋ค.
# ํฌ์ธํธ๋ค์ด ๊ฐ์ ํด๋ฌ์คํฐ์ ์ํด ์๋์ง๋ง์ด ์ค์ํ ๋ฟ์ด๋ค.
## ํ๊น๊ฐ ์์ด ๊ตฐ์ง ํ๊ฐํ๊ธฐ
# ํ๊น ๊ฐ์ด ์๋ค๋ฉด ๋ถ๋ฅ๊ธฐ๋ฅผ ๋ง๋ค๋ฉด ๋๋ค.
# ์ค๋ฃจ์ฃ ๊ณ์ : ํจ๊ณผ ๋จ์ด์ง.
# ๊ฒฌ๊ณ ์ฑ ๊ธฐ๋ฐ ์งํ ์ฌ์ฉ
### KMeans์ ๊ฐ์ฒด๋ฅผ ์์ฑํ๊ณ ์ฐพ๋ ํด๋ฌ์คํฐ์ ์๋ฅผ ์ง์ ํ๋ค. ๊ทธ ํ fit method ํธ์ถ
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
# ์ธ์์ ์ผ๋ก 2์ฐจ์ ๋ฐ์ดํฐ๋ฅผ ์์ฑํฉ๋๋ค
X, y = make_blobs(random_state=1)
# ๊ตฐ์ง ๋ชจ๋ธ์ ๋ง๋ญ๋๋ค
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
print(kmeans.labels_)
print(kmeans.predict(X))
fig, axes = plt.subplots(1, 2, figsize=(10, 5))
# ๋ ๊ฐ์ ํด๋ฌ์คํฐ ์ค์ฌ์ ์ฌ์ฉํฉ๋๋ค
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
assignments = kmeans.labels_
# ๋ค์ฏ ๊ฐ์ ํด๋ฌ์คํฐ ์ค์ฌ์ ์ฌ์ฉํฉ๋๋ค
kmeans = KMeans(n_clusters=5)
kmeans.fit(X)
assignments = kmeans.labels_
X_varied, y_varied = make_blobs(n_samples=200,
cluster_std=[1.0, 2.5, 0.5],
random_state=170)
y_pred = KMeans(n_clusters=3, random_state=0).fit_predict(X_varied)
mglearn.discrete_scatter(X_varied[:, 0], X_varied[:, 1], y_pred)
plt.legend(["ํด๋ฌ์คํฐ 0", "ํด๋ฌ์คํฐ 1", "ํด๋ฌ์คํฐ 2"], loc='best')
plt.xlabel("ํน์ฑ 0")
plt.ylabel("ํน์ฑ 1")
# ๋ฌด์์๋ก ํด๋ฌ์คํฐ ๋ฐ์ดํฐ ์์ฑํฉ๋๋ค
X, y = make_blobs(random_state=170, n_samples=600)
rng = np.random.RandomState(74)
# ๋ฐ์ดํฐ๊ฐ ๊ธธ๊ฒ ๋์ด์ง๋๋ก ๋ณ๊ฒฝํฉ๋๋ค
transformation = rng.normal(size=(2, 2))
X = np.dot(X, transformation)
# ์ธ ๊ฐ์ ํด๋ฌ์คํฐ๋ก ๋ฐ์ดํฐ์ KMeans ์๊ณ ๋ฆฌ์ฆ์ ์ ์ฉํฉ๋๋ค
kmeans = KMeans(n_clusters=3)
kmeans.fit(X)
y_pred = kmeans.predict(X)
# ํด๋ฌ์คํฐ ํ ๋น๊ณผ ํด๋ฌ์คํฐ ์ค์ฌ์ ๋ํ๋
๋๋ค
mglearn.discrete_scatter(X[:, 0], X[:, 1], kmeans.labels_, markers='o')
mglearn.discrete_scatter(
kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], [0, 1, 2],
markers='^', markeredgewidth=2)
plt.xlabel("ํน์ฑ 0")
plt.ylabel("ํน์ฑ 1")
# two_moons ๋ฐ์ดํฐ๋ฅผ ์์ฑํฉ๋๋ค(์ด๋ฒ์๋ ๋
ธ์ด์ฆ๋ฅผ ์กฐ๊ธ๋ง ๋ฃ์ต๋๋ค)
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
# ๋ ๊ฐ์ ํด๋ฌ์คํฐ๋ก ๋ฐ์ดํฐ์ KMeans ์๊ณ ๋ฆฌ์ฆ์ ์ ์ฉํฉ๋๋ค
kmeans = KMeans(n_clusters=2)
kmeans.fit(X)
y_pred = kmeans.predict(X)
# ํด๋ฌ์คํฐ ํ ๋น๊ณผ ํด๋ฌ์คํฐ ์ค์ฌ์ ํ์ํฉ๋๋ค
plt.scatter(X[:, 0], X[:, 1], c=y_pred, cmap=mglearn.cm2, s=60, edgecolors='k')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1],
marker='^', c=[mglearn.cm2(0), mglearn.cm2(1)], s=100, linewidth=2, edgecolors='k')
plt.xlabel("ํน์ฑ 0")
plt.ylabel("ํน์ฑ 1")
X_train, X_test, y_train, y_test = train_test_split(
X_people, y_people, stratify=y_people, random_state=42)
nmf = NMF(n_components=100, random_state=0)
nmf.fit(X_train)
pca = PCA(n_components=100, random_state=0)
pca.fit(X_train)
kmeans = KMeans(n_clusters=100, random_state=0)
kmeans.fit(X_train)
X_reconstructed_pca = pca.inverse_transform(pca.transform(X_test))
X_reconstructed_kmeans = kmeans.cluster_centers_[kmeans.predict(X_test)]
X_reconstructed_nmf = np.dot(nmf.transform(X_test), nmf.components_)
fig, axes = plt.subplots(3, 5, figsize=(8, 8), subplot_kw={'xticks': (), 'yticks': ()})
fig.suptitle("์ถ์ถํ ์ฑ๋ถ")
for ax, comp_kmeans, comp_pca, comp_nmf in zip(
axes.T, kmeans.cluster_centers_, pca.components_, nmf.components_):
ax[0].imshow(comp_kmeans.reshape(image_shape))
ax[1].imshow(comp_pca.reshape(image_shape), cmap='viridis')
ax[2].imshow(comp_nmf.reshape(image_shape))
axes[0, 0].set_ylabel("kmeans")
axes[1, 0].set_ylabel("pca")
axes[2, 0].set_ylabel("nmf")
fig, axes = plt.subplots(4, 5, subplot_kw={'xticks': (), 'yticks': ()},
figsize=(8, 8))
fig.suptitle("์ฌ๊ตฌ์ฑ")
for ax, orig, rec_kmeans, rec_pca, rec_nmf in zip(
axes.T, X_test, X_reconstructed_kmeans, X_reconstructed_pca,
X_reconstructed_nmf):
ax[0].imshow(orig.reshape(image_shape))
ax[1].imshow(rec_kmeans.reshape(image_shape))
ax[2].imshow(rec_pca.reshape(image_shape))
ax[3].imshow(rec_nmf.reshape(image_shape))
axes[0, 0].set_ylabel("์๋ณธ")
axes[1, 0].set_ylabel("kmeans")
axes[2, 0].set_ylabel("pca")
axes[3, 0].set_ylabel("nmf")
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
kmeans = KMeans(n_clusters=10, random_state=0)
kmeans.fit(X)
y_pred = kmeans.predict(X)
plt.scatter(X[:, 0], X[:, 1], c=y_pred, s=60, cmap='Paired', edgecolors='black')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=60,
marker='^', c=range(kmeans.n_clusters), linewidth=2, cmap='Paired', edgecolors='black')
plt.xlabel("ํน์ฑ 0")
plt.ylabel("ํน์ฑ 1")
print("ํด๋ฌ์คํฐ ๋ ์ด๋ธ:\n", y_pred)
distance_features = kmeans.transform(X)
print("ํด๋ฌ์คํฐ ๊ฑฐ๋ฆฌ ๋ฐ์ดํฐ์ ํํ:", distance_features.shape)
print("ํด๋ฌ์คํฐ ๊ฑฐ๋ฆฌ:\n", distance_features)
### 3.5.2 ๋ณํฉ ๊ตฐ์ง
from sklearn.cluster import AgglomerativeClustering
X, y = make_blobs(random_state=1)
agg = AgglomerativeClustering(n_clusters=3)
assignment = agg.fit_predict(X)
mglearn.discrete_scatter(X[:, 0], X[:, 1], assignment)
plt.legend(["ํด๋ฌ์คํฐ 0", "ํด๋ฌ์คํฐ 1", "ํด๋ฌ์คํฐ 2"], loc="best")
plt.xlabel("ํน์ฑ 0")
plt.ylabel("ํน์ฑ 1")
# SciPy์์ ward ๊ตฐ์ง ํจ์์ ๋ด๋๋ก๊ทธ๋จ ํจ์๋ฅผ ์ํฌํธํฉ๋๋ค
from scipy.cluster.hierarchy import dendrogram, ward
X, y = make_blobs(random_state=0, n_samples=12)
# ๋ฐ์ดํฐ ๋ฐฐ์ด X ์ ward ํจ์๋ฅผ ์ ์ฉํฉ๋๋ค
# SciPy์ ward ํจ์๋ ๋ณํฉ ๊ตฐ์ง์ ์ํํ ๋ ์์ฑ๋
# ๊ฑฐ๋ฆฌ ์ ๋ณด๊ฐ ๋ด๊ธด ๋ฐฐ์ด์ ๋ฆฌํดํฉ๋๋ค
linkage_array = ward(X)
# ํด๋ฌ์คํฐ ๊ฐ์ ๊ฑฐ๋ฆฌ ์ ๋ณด๊ฐ ๋ด๊ธด linkage_array๋ฅผ ์ฌ์ฉํด ๋ด๋๋ก๊ทธ๋จ์ ๊ทธ๋ฆฝ๋๋ค
dendrogram(linkage_array)
# ๋ ๊ฐ์ ์ธ ๊ฐ์ ํด๋ฌ์คํฐ๋ฅผ ๊ตฌ๋ถํ๋ ์ปคํธ๋ผ์ธ์ ํ์ํฉ๋๋ค
ax = plt.gca()
bounds = ax.get_xbound()
ax.plot(bounds, [7.25, 7.25], '--', c='k')
ax.plot(bounds, [4, 4], '--', c='k')
ax.text(bounds[1], 7.25, ' ๋ ๊ฐ ํด๋ฌ์คํฐ', va='center', fontdict={'size': 15})
ax.text(bounds[1], 4, ' ์ธ ๊ฐ ํด๋ฌ์คํฐ', va='center', fontdict={'size': 15})
plt.xlabel("์ํ ๋ฒํธ")
plt.ylabel("ํด๋ฌ์คํฐ ๊ฑฐ๋ฆฌ")
### 3.5.3 DBSCAN
### ํ๊น๊ฐ์ผ๋ก ๊ตฐ์ง ํ๊ฐํ๊ธฐ
from sklearn.metrics.cluster import adjusted_rand_score
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
# ํ๊ท ์ด 0, ๋ถ์ฐ์ด 1์ด ๋๋๋ก ๋ฐ์ดํฐ์ ์ค์ผ์ผ์ ์กฐ์ ํฉ๋๋ค
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
fig, axes = plt.subplots(1, 4, figsize=(15, 3),
subplot_kw={'xticks': (), 'yticks': ()})
# ์ฌ์ฉํ ์๊ณ ๋ฆฌ์ฆ ๋ชจ๋ธ์ ๋ฆฌ์คํธ๋ก ๋ง๋ญ๋๋ค
algorithms = [KMeans(n_clusters=2), AgglomerativeClustering(n_clusters=2),
DBSCAN()]
# ๋น๊ต๋ฅผ ์ํด ๋ฌด์์๋ก ํด๋ฌ์คํฐ ํ ๋น์ ํฉ๋๋ค
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X))
# ๋ฌด์์ ํ ๋นํ ํด๋ฌ์คํฐ๋ฅผ ๊ทธ๋ฆฝ๋๋ค
axes[0].scatter(X_scaled[:, 0], X_scaled[:, 1], c=random_clusters,
cmap=mglearn.cm3, s=60, edgecolors='black')
axes[0].set_title("๋ฌด์์ ํ ๋น - ARI: {:.2f}".format(
adjusted_rand_score(y, random_clusters)))
for ax, algorithm in zip(axes[1:], algorithms):
# ํด๋ฌ์คํฐ ํ ๋น๊ณผ ํด๋ฌ์คํฐ ์ค์ฌ์ ๊ทธ๋ฆฝ๋๋ค
clusters = algorithm.fit_predict(X_scaled)
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=clusters,
cmap=mglearn.cm3, s=60, edgecolors='black')
ax.set_title("{} - ARI: {:.2f}".format(algorithm.__class__.__name__,
adjusted_rand_score(y, clusters)))
from sklearn.metrics import accuracy_score
# ํฌ์ธํธ๊ฐ ํด๋ฌ์คํฐ๋ก ๋๋ ๋ ๊ฐ์ง ๊ฒฝ์ฐ
clusters1 = [0, 0, 1, 1, 0]
clusters2 = [1, 1, 0, 0, 1]
# ๋ชจ๋ ๋ ์ด๋ธ์ด ๋ฌ๋ผ์ก์ผ๋ฏ๋ก ์ ํ๋๋ 0์
๋๋ค
print("์ ํ๋: {:.2f}".format(accuracy_score(clusters1, clusters2)))
# ๊ฐ์ ํฌ์ธํธ๊ฐ ํด๋ฌ์คํฐ์ ๋ชจ์์ผ๋ฏ๋ก ARI๋ 1์
๋๋ค
print("ARI: {:.2f}".format(adjusted_rand_score(clusters1, clusters2)))
### ํ๊น ๊ฐ ์์ด ๊ตฐ์ง ํ๊ฐํ๊ธฐ
from sklearn.metrics.cluster import silhouette_score
X, y = make_moons(n_samples=200, noise=0.05, random_state=0)
# ํ๊ท ์ด 0, ๋ถ์ฐ์ด 1์ด ๋๋๋ก ๋ฐ์ดํฐ์ ์ค์ผ์ผ์ ์กฐ์ ํฉ๋๋ค
scaler = StandardScaler()
scaler.fit(X)
X_scaled = scaler.transform(X)
fig, axes = plt.subplots(1, 4, figsize=(15, 3),
subplot_kw={'xticks': (), 'yticks': ()})
# ๋น๊ต๋ฅผ ์ํด ๋ฌด์์๋ก ํด๋ฌ์คํฐ ํ ๋น์ ํฉ๋๋ค
random_state = np.random.RandomState(seed=0)
random_clusters = random_state.randint(low=0, high=2, size=len(X))
# ๋ฌด์์ ํ ๋นํ ํด๋ฌ์คํฐ๋ฅผ ๊ทธ๋ฆฝ๋๋ค
axes[0].scatter(X_scaled[:, 0], X_scaled[:, 1], c=random_clusters,
cmap=mglearn.cm3, s=60, edgecolors='black')
axes[0].set_title("๋ฌด์์ ํ ๋น: {:.2f}".format(
silhouette_score(X_scaled, random_clusters)))
algorithms = [KMeans(n_clusters=2), AgglomerativeClustering(n_clusters=2),
DBSCAN()]
for ax, algorithm in zip(axes[1:], algorithms):
clusters = algorithm.fit_predict(X_scaled)
# ํด๋ฌ์คํฐ ํ ๋น๊ณผ ํด๋ฌ์คํฐ ์ค์ฌ์ ๊ทธ๋ฆฝ๋๋ค
ax.scatter(X_scaled[:, 0], X_scaled[:, 1], c=clusters, cmap=mglearn.cm3,
s=60, edgecolors='black')
ax.set_title("{} : {:.2f}".format(algorithm.__class__.__name__,
silhouette_score(X_scaled, clusters)))
### ์ผ๊ตด ๋ฐ์ดํฐ์
์ผ๋ก ๊ตฐ์ง ์๊ณ ๋ฆฌ์ฆ ๋น๊ต
# LFW ๋ฐ์ดํฐ์์ ๊ณ ์ ์ผ๊ตด์ ์ฐพ์ ๋ค์ ๋ฐ์ดํฐ๋ฅผ ๋ณํํฉ๋๋ค
from sklearn.decomposition import PCA
pca = PCA(n_components=100, whiten=True, random_state=0)
X_pca = pca.fit_transform(X_people)
## DBSCAN์ผ๋ก ์ผ๊ตด ๋ฐ์ดํฐ์
๋ถ์
# ๊ธฐ๋ณธ ๋งค๊ฐ๋ณ์๋ก DBSCAN์ ์ ์ฉํฉ๋๋ค
dbscan = DBSCAN()
labels = dbscan.fit_predict(X_pca)
print("๊ณ ์ ํ ๋ ์ด๋ธ:", np.unique(labels))
dbscan = DBSCAN(min_samples=3)
labels = dbscan.fit_predict(X_pca)
print("๊ณ ์ ํ ๋ ์ด๋ธ:", np.unique(labels))
dbscan = DBSCAN(min_samples=3, eps=15)
labels = dbscan.fit_predict(X_pca)
print("๊ณ ์ ํ ๋ ์ด๋ธ:", np.unique(labels))
# ์ก์ ํฌ์ธํธ์ ํด๋ฌ์คํฐ์ ์ํ ํฌ์ธํธ ์๋ฅผ ์
๋๋ค.
# bincount๋ ์์๋ฅผ ๋ฐ์ ์ ์์ด์ labels์ 1์ ๋ํ์ต๋๋ค.
# ๋ฐํ๊ฐ์ ์ฒซ ๋ฒ์งธ ์์๋ ์ก์ ํฌ์ธํธ์ ์์
๋๋ค.
print("ํด๋ฌ์คํฐ๋ณ ํฌ์ธํธ ์:", np.bincount(labels + 1))
noise = X_people[labels==-1]
fig, axes = plt.subplots(3, 9, subplot_kw={'xticks': (), 'yticks': ()},
figsize=(12, 4))
for image, ax in zip(noise, axes.ravel()):
ax.imshow(image.reshape(image_shape), vmin=0, vmax=1)
for eps in [1, 3, 5, 7, 9, 11, 13]:
print("\neps=", eps)
dbscan = DBSCAN(eps=eps, min_samples=3)
labels = dbscan.fit_predict(X_pca)
print("ํด๋ฌ์คํฐ ์:", len(np.unique(labels)))
print("ํด๋ฌ์คํฐ ํฌ๊ธฐ:", np.bincount(labels + 1))
dbscan = DBSCAN(min_samples=3, eps=7)
labels = dbscan.fit_predict(X_pca)
for cluster in range(max(labels) + 1):
mask = labels == cluster
n_images = np.sum(mask)
fig, axes = plt.subplots(1, 14, figsize=(14*1.5, 4),
subplot_kw={'xticks': (), 'yticks': ()})
i = 0
for image, label, ax in zip(X_people[mask], y_people[mask], axes):
ax.imshow(image.reshape(image_shape), vmin=0, vmax=1)
ax.set_title(people.target_names[label].split()[-1])
i += 1
for j in range(len(axes) - i):
axes[j+i].imshow(np.array([[1]*65]*87), vmin=0, vmax=1)
axes[j+i].axis('off')
## k-ํ๊ท ์ผ๋ก ์ผ๊ตด ๋ฐ์ดํฐ์
๋ถ์ํ๊ธฐ
n_clusters = 10
# k-ํ๊ท ์ผ๋ก ํด๋ฌ์คํฐ๋ฅผ ์ถ์ถํฉ๋๋ค
km = KMeans(n_clusters=n_clusters, random_state=0)
labels_km = km.fit_predict(X_pca)
print("k-ํ๊ท ์ ํด๋ฌ์คํฐ ํฌ๊ธฐ:", np.bincount(labels_km))
fig, axes = plt.subplots(2, 5, subplot_kw={'xticks': (), 'yticks': ()},
figsize=(12, 4))
for center, ax in zip(km.cluster_centers_, axes.ravel()):
ax.imshow(pca.inverse_transform(center).reshape(image_shape),
vmin=0, vmax=1)
mglearn.plots.plot_kmeans_faces(km, pca, X_pca, X_people,
y_people, people.target_names)
## ๋ณํฉ ๊ตฐ์ง์ผ๋ก ์ผ๊ตด ๋ฐ์ดํฐ์
๋ถ์ํ๊ธฐ
# ๋ณํฉ ๊ตฐ์ง์ผ๋ก ํด๋ฌ์คํฐ๋ฅผ ์ถ์ถํฉ๋๋ค
agglomerative = AgglomerativeClustering(n_clusters=10)
labels_agg = agglomerative.fit_predict(X_pca)
print("๋ณํฉ ๊ตฐ์ง์ ํด๋ฌ์คํฐ ํฌ๊ธฐ:",
np.bincount(labels_agg))
print("ARI: {:.2f}".format(adjusted_rand_score(labels_agg, labels_km)))
linkage_array = ward(X_pca)
# ํด๋ฌ์คํฐ ์ฌ์ด์ ๊ฑฐ๋ฆฌ๊ฐ ๋ด๊ฒจ์๋ linkage_array๋ก ๋ด๋๋ก๊ทธ๋จ์ ๊ทธ๋ฆฝ๋๋ค
plt.figure(figsize=(20, 5))
dendrogram(linkage_array, p=7, truncate_mode='level', no_labels=True)
plt.xlabel("์ํ ๋ฒํธ")
plt.ylabel("ํด๋ฌ์คํฐ ๊ฑฐ๋ฆฌ")
ax = plt.gca()
bounds = ax.get_xbound()
ax.plot(bounds, [36, 36], '--', c='k')
n_clusters = 10
for cluster in range(n_clusters):
mask = labels_agg == cluster
fig, axes = plt.subplots(1, 10, subplot_kw={'xticks': (), 'yticks': ()},
figsize=(15, 8))
axes[0].set_ylabel(np.sum(mask))
for image, label, asdf, ax in zip(X_people[mask], y_people[mask],
labels_agg[mask], axes):
ax.imshow(image.reshape(image_shape), vmin=0, vmax=1)
ax.set_title(people.target_names[label].split()[-1],
fontdict={'fontsize': 9})
# ๋ณํฉ ๊ตฐ์ง์ผ๋ก ํด๋ฌ์คํฐ๋ฅผ ์ถ์ถํฉ๋๋ค
agglomerative = AgglomerativeClustering(n_clusters=40)
labels_agg = agglomerative.fit_predict(X_pca)
print("๋ณํฉ ๊ตฐ์ง์ ํด๋ฌ์คํฐ ํฌ๊ธฐ:", np.bincount(labels_agg))
n_clusters = 40
for cluster in [13, 16, 23, 38, 39]: # ํฅ๋ฏธ๋ก์ด ํด๋ฌ์คํฐ ๋ช๊ฐ๋ฅผ ๊ณจ๋์ต๋๋ค
mask = labels_agg == cluster
fig, axes = plt.subplots(1, 15, subplot_kw={'xticks': (), 'yticks': ()},
figsize=(15, 8))
cluster_size = np.sum(mask)
axes[0].set_ylabel("#{}: {}".format(cluster, cluster_size))
for image, label, asdf, ax in zip(X_people[mask], y_people[mask],
labels_agg[mask], axes):
ax.imshow(image.reshape(image_shape), vmin=0, vmax=1)
ax.set_title(people.target_names[label].split()[-1],
fontdict={'fontsize': 9})
for i in range(cluster_size, 15):
axes[i].set_visible(False)
### ์์ฝ ๋ฐ ์ ๋ฆฌ
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
|
# -*- coding:utf-8 -*-
from torcms.core import tools
from torcms.model.post_hist_model import MPostHist
from torcms.model.post_model import MPost
class TestMPostHist():
def setup(self):
print('setup ๆนๆณๆง่กไบๆฌ็ฑปไธญๆฏๆก็จไพไนๅ')
self.uid = ''
self.post_id = 'llk8'
def test_create_post_history(self):
self.tearDown()
p_d = {
'title': 'qqqii',
'cnt_md': 'qwqwqw',
'time_create': '1999',
'time_update': '2019',
'user_name': 'max',
'view_count': '1',
'logo': 'opps',
'memo': '',
'order': '1',
'kind': '1',
'valid': 1,
}
MPost().add_meta(self.post_id, p_d)
aa = MPost.get_by_uid(self.post_id)
tf = MPostHist.create_post_history(aa, aa)
assert tf
His = MPostHist.query_by_postid(self.post_id)
self.uid = His[0].uid
assert His[0].cnt_md == p_d['cnt_md']
self.tearDown()
def addHis(self, **kwargs):
p_d = {
'title': kwargs.get('title', 'iiiii'),
'cnt_md': kwargs.get('cnt_md', 'grgr'),
'time_create': kwargs.get('time_create', '1992'),
'time_update': kwargs.get('time_update', '1996070600'),
'user_name': kwargs.get('user_name', 'yuanyuan'),
'view_count': kwargs.get('view_count', 1),
'logo': kwargs.get('logo', 'prprprprpr'),
'memo': kwargs.get('memo', ''),
'order': kwargs.get('order', '1'),
'keywords': kwargs.get('keywords', ''),
'extinfo': kwargs.get('extinfo', {}),
'kind': kwargs.get('kind', '1'),
'valid': kwargs.get('valid', 1),
}
MPost().add_meta(self.post_id, p_d)
aa = MPost.get_by_uid(self.post_id)
MPostHist.create_post_history(aa, aa)
His = MPostHist.query_by_postid(self.post_id)
self.uid = His[0].uid
def test_get_by_uid(self):
p_t = {
'cnt_md': 'bbrreedd'
}
self.addHis(**p_t)
pp = MPostHist.get_by_uid(self.uid)
assert pp.cnt_md == p_t['cnt_md']
self.tearDown()
def test_update_cnt(self):
self.addHis()
post_data = {
'user_name': 'giser',
'cnt_md': 'gisersdfsdfsdf'
}
MPostHist.update_cnt(self.uid, post_data)
pp = MPostHist.get_by_uid(self.uid)
assert pp.cnt_md == post_data['cnt_md']
self.tearDown()
def test_query_by_postid(self):
p_t = {
'cnt_md': 'bbrreedd',
'user_name': 'ggggbabybaby'
}
self.addHis(**p_t)
aa = MPostHist.query_by_postid(self.post_id)
assert aa[0].cnt_md == p_t['cnt_md']
assert aa[0].user_name == p_t['user_name']
self.tearDown()
def test_get_last(self):
p_t = {
'cnt_md': 'bbrreedd',
'user_name': 'snow'
}
self.addHis(**p_t)
aa = MPostHist.get_last(self.post_id)
assert aa.user_name == p_t['user_name']
self.tearDown()
def test_delete(self):
aa = MPostHist.get_by_uid(self.uid)
assert aa == None
self.addHis()
aa = MPostHist.get_by_uid(self.uid)
assert aa.post_id == self.post_id
aa = MPostHist.delete(self.post_id)
assert aa == False
self.tearDown()
def tearDown(self):
print("function teardown")
tt = MPostHist.get_by_uid(self.uid)
if tt:
MPostHist.delete(tt.uid)
tt = MPost.get_by_uid(self.post_id)
if tt:
MPost.delete(tt.uid)
|
import numpy
# from chainer import testing
# from chainer import utils
import ideep4py
x1 = numpy.ndarray(shape=(2, 16, 2, 2), dtype=numpy.float32, order='C')
x2 = numpy.ndarray(shape=(2, 16, 2, 2), dtype=numpy.float32, order='C')
mx1 = ideep4py.mdarray(x1)
mx2 = ideep4py.mdarray(x2)
numpy.copyto(x2, x1)
ideep4py.basic_copyto(mx2, mx1)
t = numpy.asarray(mx2)
numpy.allclose(t, x2, 1e-5, 1e-4, True)
x1 = numpy.ndarray(shape=(2, 16, 2, 2), dtype=numpy.float32, order='C')
x2 = numpy.ndarray(shape=(2, 16, 2, 2), dtype=numpy.float32, order='C')
mx2 = ideep4py.mdarray(x2)
numpy.copyto(x2, x1)
ideep4py.basic_copyto(mx2, x1)
t = numpy.asarray(mx2)
numpy.allclose(t, x2, 1e-5, 1e-4, True)
|
from api.gcp.tasks.baseTask import baseTask
import json
import datetime
from core.input_form_singleton import input_form_singleton
from core.form_singleton import formSingleton_singleton
from db.connection_pool import MysqlConn
from utils.status_code import response_code
from config import configuration
import traceback
import logging
from googleapiclient.errors import HttpError
from utils.status_code import response_code
logger = logging.getLogger("main." + __name__)
class system_create_tag(baseTask):
api_type = 'system'
api_name = 'system_create_tag'
arguments = {}
def __init__(self, stage_dict):
super(system_create_tag, self).__init__(stage_dict)
# print('stage_dict:', stage_dict)
def execute(self, workspace_id=None, form_id=None, input_form_id=None, user_id=None):
conn = MysqlConn()
try:
db_name = configuration.get_database_name()
missing_set = set()
for key in self.arguments:
check_key = self.stage_dict.get(key, 'NotFound')
if check_key == 'NotFound':
missing_set.add(key)
# # print('{}: {}'.format(key, self.stage_dict[key]))
if len(missing_set) != 0:
data = response_code.BAD_REQUEST
data['msg'] = 'Missing parameters: {}'.format(', '.join(missing_set))
return data
else:
# get tag template id
sql = self.create_select_sql(db_name, 'tagTemplatesTable', 'id,tag_template_form_id', condition="tag_template_form_id='%s' order by id desc" % form_id)
logger.debug("FN:system_create_tag_execute tagTemplatesTable_sql:{}".format(sql))
tag_template_info = self.execute_fetch_all(conn, sql)
tag_tempalte_local_id = tag_template_info[0]['id']
input_form_data = input_form_singleton.get_input_form_data(user_id, input_form_id)
form_data = formSingleton_singleton.get_details_form_by_id(form_id, workspace_id)
logger.debug("FN:system_create_tag_execute input_form_data:{} form_data:{}".format(input_form_data, form_data))
if input_form_data['code'] == 200 and form_data['code'] == 200:
input_form_info = input_form_data['data'][0]
form_info = form_data['data']
form_field_values_dict = input_form_info['form_field_values_dict']
field_list = form_info['fieldList']
# get tag fields data
tag_fields_dict = {}
for field_info in field_list:
id = str(field_info['id'])
label = field_info['label']
tag_field_id = label.replace(' ', '_').lower().strip()
if id in form_field_values_dict:
value = form_field_values_dict[id]
else:
value = None
field = {'displayName': label,
'field_id': tag_field_id,
'value': value}
tag_fields_dict[id] = field
create_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fields = ('form_id', 'input_form_id', 'creator_id', 'tag_template_local_id', 'field_list', 'create_time')
values = (form_id, input_form_id, user_id, tag_tempalte_local_id, json.dumps(tag_fields_dict).replace('\\', '\\\\'), create_time)
sql = self.create_insert_sql(db_name, 'tagTemplatesValueTable', '({})'.format(', '.join(fields)), values)
logger.debug("FN:system_create_tag_execute insert_tagTemplatesValueTable_sql:{}".format(sql))
_ = self.insert_exec(conn, sql)
data = response_code.SUCCESS
data['data'] = 'create successfully.'
return data
else:
data = response_code.BAD_REQUEST
data['msg'] = 'get form failed, input_form_data_code:{}, input_form_id:{}, user_id:{}; ' \
'form_data_code:{}, form_id:{}, workspace_id:{}'.format(
str(input_form_data['code']),
str(input_form_id),
str(user_id),
str(form_data['code']),
str(form_id),
str(workspace_id))
return data
except HttpError as e:
error_json = json.loads(e.content, strict=False)
data = error_json['error']
data["msg"] = data.pop("message")
logger.error("FN:system_create_tag_execute error:{}".format(traceback.format_exc()))
return data
except Exception as e:
logger.error("FN:system_create_tag_execute error:{}".format(traceback.format_exc()))
data = response_code.BAD_REQUEST
data['msg'] = str(e)
return data
finally:
conn.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.