content stringlengths 5 1.05M |
|---|
import spotipy
import spotipy.util as util
from pprint import pprint
while True:
username = input("Type the Spotify user ID to use: ")
token = util.prompt_for_user_token(username, show_dialog=True)
sp = spotipy.Spotify(token)
pprint(sp.me()) |
import json
import os
import shutil
import subprocess
sourceDirectory = '/Volumes/encrypted/babybrains/converted'
targetDirectory = '/Volumes/encrypted/babybrains/converted-renamed'
patientMap = {}
studyMap = {}
seriesMap = {}
def rename(subdirectory):
lsProcess = subprocess.Popen(['ls', os.path.join(subdirectory)], stdout=subprocess.PIPE)
nrrds = []
for line in lsProcess.stdout.readlines():
if line.strip().endswith('.nrrd'):
nrrds.append(line.strip())
for nrrd in nrrds:
patient = subdirectory.split('/')[-2]
study = subdirectory.split('/')[-1]
if not patient in patientMap:
patientMap[patient] = 'patient-%05d' % len(patientMap.keys())
studyMap[patient] = {}
if not study in studyMap[patient]:
studyMap[patient][study] = 'study-%05d' % len(studyMap[patient].keys())
outputPath = os.path.join(targetDirectory, patientMap[patient], studyMap[patient][study])
if not os.path.exists(outputPath):
os.makedirs(outputPath)
nrrdPath = os.path.join(outputPath, patientMap[patient] + "_" + studyMap[patient][study] + "_" + "series-" + str(nrrds.index(nrrd)) + ".nrrd")
print('-- copy --')
oldPath = os.path.join(subdirectory, nrrd)
print(oldPath)
print(nrrdPath)
shutil.copyfile(oldPath, nrrdPath)
findProcess = subprocess.Popen(['find', sourceDirectory, '-type', 'd'], stdout=subprocess.PIPE)
while True:
try:
subdirectory = findProcess.stdout.readline().strip()
if subdirectory == "":
break
print(subdirectory)
rename(subdirectory)
#if len(patientMap.keys()) > 20:
#break
except:
print ('-'*40)
import traceback
traceback.print_exc() # XXX But this goes to stderr!
print ('-'*40)
fp = open(os.path.join(sourceDirectory, "patientMap.json"), 'w')
fp.write(json.dumps(patientMap))
fp.close()
fp = open(os.path.join(sourceDirectory, "studyMap.json"), 'w')
fp.write(json.dumps(studyMap))
fp.close()
|
"""
* Created by Synerty Pty Ltd
*
* This software is open source, the MIT license applies.
*
* Website : http://www.synerty.com
* Support : support@synerty.com
"""
import pem
import logging
import platform
from typing import Optional
from twisted.internet import reactor
from twisted.internet.ssl import DefaultOpenSSLContextFactory
from twisted.web import server
from txhttputil.login_page.LoginElement import LoginElement
from txhttputil.site.AuthCredentials import AllowAllAuthCredentials, AuthCredentials
from txhttputil.site.AuthSessionWrapper import FormBasedAuthSessionWrapper
from txhttputil.site.FileUploadRequest import FileUploadRequest
from txhttputil.site.RedirectToHttpsResource import RedirectToHttpsResource
from txwebsocket.txws import WebSocketUpgradeHTTPChannel
logger = logging.getLogger(__name__)
def setupSite(name: str,
rootResource,
portNum: int = 8000,
credentialChecker: AuthCredentials = AllowAllAuthCredentials(),
enableLogin=True,
SiteProtocol=WebSocketUpgradeHTTPChannel,
redirectFromHttpPort: Optional[int] = None,
sslBundleFilePath: Optional[str] = None):
""" Setup Site
Sets up the web site to listen for connections and serve the site.
Supports customisation of resources based on user details
@return: Port object
"""
if redirectFromHttpPort is not None:
setupSite(name='%s https redirect' % name,
portNum=redirectFromHttpPort,
rootResource=RedirectToHttpsResource(portNum),
enableLogin=False)
LoginElement.siteName = name
if enableLogin:
protectedResource = FormBasedAuthSessionWrapper(rootResource, credentialChecker)
else:
logger.critical("Resource protection disabled NO LOGIN REQUIRED")
protectedResource = rootResource
site = server.Site(protectedResource)
site.protocol = SiteProtocol
site.requestFactory = FileUploadRequest
if sslBundleFilePath:
proto = 'https'
contextFactory = pem.twisted.certificateOptionsFromFiles(sslBundleFilePath)
sitePort = reactor.listenSSL(portNum, site, contextFactory)
else:
proto = 'http'
sitePort = reactor.listenTCP(portNum, site)
if platform.system() is "Linux":
import subprocess
ip = subprocess.getoutput("/sbin/ifconfig").split("\n")[1].split()[1][5:]
else:
ip = "0.0.0.0"
logger.info('%s is alive and listening on %s://%s:%s', name, proto, ip, sitePort.port)
return sitePort
|
import re
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Tuple, Union
if TYPE_CHECKING:
from .base import BaseRepoAnalyzer
from .git import LocalBranch
Pathish = Union[str, Path]
class UnsupportedURLError(ValueError):
def __init__(self, url):
self.url = url
def __str__(self):
return f"Unsupported URL: {self.url}"
def rooturl(url: str) -> str:
"""
Turn Git `url` to something web browsers recognize.
>>> rooturl("git@github.com:group/project.git")
'https://github.com/group/project'
>>> rooturl("git@gitlab.com:group/project.git")
'https://gitlab.com/group/project'
>>> rooturl("git@bitbucket.org:group/project.git")
'https://bitbucket.org/group/project'
>>> rooturl("ssh://git@bitbucket.org/group/project.git")
'https://bitbucket.org/group/project'
>>> rooturl("git@gitlab.com:group/project.wiki.git")
'https://gitlab.com/group/project/wikis'
>>> rooturl("unsupported.host:some/remote/path")
Traceback (most recent call last):
...
vcslinks...UnsupportedURLError: Unsupported URL: unsupported.host:some/remote/path
"""
return _specialurl(_rooturl(url))
def _rooturl(url: str) -> str:
match = re.match(r"^https?://(.*?)(.git)?$", url)
if match:
return f"https://{match.group(1)}"
if "@" not in url:
raise UnsupportedURLError(url)
_, web = url.split("@", 1)
if ":" in web:
host, path = web.split(":", 1)
elif "/" in web:
host, path = web.split("/", 1)
else:
raise UnsupportedURLError(url)
if path.endswith(".git"):
path = path[: -len(".git")]
return f"https://{host}/{path}"
def _specialurl(url: str) -> str:
host, path = url.split("://", 1)[1].split("/", 1)
if "gitlab" in host and path.endswith(".wiki"):
return f"https://{host}/{path[:-len('.wiki')]}/wikis"
return url
LinesSpecifier = Union[None, int, Tuple[int, int]]
def parselines(lines: Optional[str]) -> LinesSpecifier:
"""
Parse `lines` argument from the CLI.
>>> parselines(None) is None
True
>>> parselines("1")
1
>>> parselines("1-2")
(1, 2)
"""
if not lines:
return None
if "-" in lines:
beg, end = lines.split("-")
return (int(beg), int(end))
else:
return int(lines)
class WebURL:
local_branch: "LocalBranch"
repo: "BaseRepoAnalyzer"
rooturl: str
def __init__(self, local_branch: "LocalBranch"):
self.local_branch = local_branch
self.repo = local_branch.repo
self.rooturl = rooturl(local_branch.remote_url())
def is_bitbucket(self):
return "//bitbucket.org" in self.rooturl
def is_gitlab(self):
return "//gitlab.com" in self.rooturl
def is_github(self):
return "//github.com" in self.rooturl
def is_gitlab_wiki(self):
return re.search(r"//gitlab\.com/[^/]+/[^/]+/wikis", self.rooturl)
def pull_request(self) -> Optional[str]:
"""
Get a URL to the web page for submitting a PR.
..
>>> from vcslinks import testing
>>> weburl_github = testing.dummy_github_weburl()
>>> weburl_gitlab = testing.dummy_gitlab_weburl()
>>> weburl_bitbucket = testing.dummy_bitbucket_weburl()
>>> weburl_github.pull_request()
'https://github.com/USER/PROJECT/pull/new/master'
>>> weburl_gitlab.pull_request()
'https://gitlab.com/USER/PROJECT/merge_requests/new?merge_request%5Bsource_branch%5D=master'
>>> weburl_bitbucket.pull_request()
'https://bitbucket.org/USER/PROJECT/pull-requests/new?source=master'
"""
branch = self.local_branch.remote_branch()
if self.is_github():
# https://github.com/{user}/{repo}/pull/new/{branch}
return self.rooturl + "/pull/new/" + branch
elif self.is_gitlab():
# https://gitlab.com/{user}/{repo}/merge_requests/new?merge_request%5Bsource_branch%5D={dev}
return (
self.rooturl
+ "/merge_requests/new?merge_request%5Bsource_branch%5D="
+ branch
)
elif self.is_bitbucket():
# https://bitbucket.org/{user}/{repo}/pull-requests/new?source={branch}
return self.rooturl + "/pull-requests/new?source=" + branch
return None
def commit(self, revision: str) -> str:
"""
Get a URL to commit page.
..
>>> from vcslinks.testing import dummy_github_weburl
>>> weburl = dummy_github_weburl()
>>> weburl.commit("master")
'https://github.com/USER/PROJECT/commit/55150afe539493d650889224db136bc8d9b7ecb8'
"""
revision = self.repo.resolve_revision(revision)
if self.is_bitbucket():
return f"{self.rooturl}/commits/{revision}"
else:
return f"{self.rooturl}/commit/{revision}"
def log(self, branch: Optional[str] = None) -> str:
"""
Get a URL to history page.
..
>>> from vcslinks import testing
>>> weburl_github = testing.dummy_github_weburl()
>>> weburl_gitlab = testing.dummy_gitlab_weburl()
>>> weburl_bitbucket = testing.dummy_bitbucket_weburl()
>>> weburl_github.log()
'https://github.com/USER/PROJECT/commits/master'
>>> weburl_gitlab.log()
'https://gitlab.com/USER/PROJECT/commits/master'
>>> weburl_bitbucket.log()
'https://bitbucket.org/USER/PROJECT/commits/branch/master'
>>> weburl_github.log("dev")
'https://github.com/USER/PROJECT/commits/dev'
"""
if not branch:
branch = self.local_branch.remote_branch()
if self.is_bitbucket():
return f"{self.rooturl}/commits/branch/{branch}"
else:
return f"{self.rooturl}/commits/{branch}"
def _format_lines(
self, lines: LinesSpecifier, bitbucket_prefix: str = "lines"
) -> str:
if not lines:
return ""
nums: Union[Tuple[int], Tuple[int, int]] = lines if isinstance(
lines, tuple
) else (lines,)
if self.is_bitbucket():
fragment = bitbucket_prefix + "-" + ":".join(map(str, nums))
elif self.is_gitlab():
fragment = "L" + "-".join(map(str, nums))
else:
# github.com
fragment = "-".join(f"L{x}" for x in nums)
return f"#{fragment}"
def _remote_revision(self, revision: Optional[str], permalink: bool) -> str:
if permalink:
return self.repo.resolve_revision(revision or self.local_branch.name)
elif not revision:
# Now that we know that `revision` is not required to be
# resolved, we can safely return (unqualified) remote
# branch name:
return self.local_branch.remote_branch()
return revision
def _file_revision(
self, lines: LinesSpecifier, revision: Optional[str], permalink: Optional[bool]
) -> str:
if permalink is None:
permalink = lines is not None
return self._remote_revision(revision, permalink)
def file(
self,
file: Pathish,
lines: LinesSpecifier = None,
revision: Optional[str] = None,
permalink: Optional[bool] = None,
) -> str:
"""
Get a URL to file.
**GitHub**
..
>>> from vcslinks import testing
>>> weburl = testing.dummy_github_weburl()
>>> weburl.file("README.md")
'https://github.com/USER/PROJECT/blob/master/README.md'
>>> weburl.file("README.md", permalink=True)
'https://github.com/USER/PROJECT/blob/55150afe539493d650889224db136bc8d9b7ecb8/README.md'
>>> weburl.file("README.md", lines=1)
'https://github.com/USER/PROJECT/blob/55150afe539493d650889224db136bc8d9b7ecb8/README.md#L1'
>>> weburl.file("README.md", lines=(1, 2))
'https://github.com/USER/PROJECT/blob/55150afe539493d650889224db136bc8d9b7ecb8/README.md#L1-L2'
>>> weburl.file("README.md", lines=(1, 2), permalink=False)
'https://github.com/USER/PROJECT/blob/master/README.md#L1-L2'
**GitLab**
..
>>> weburl = testing.dummy_gitlab_weburl()
>>> weburl.file("README.md", lines=1)
'https://gitlab.com/USER/PROJECT/blob/55150afe539493d650889224db136bc8d9b7ecb8/README.md#L1'
>>> weburl.file("README.md", lines=(1, 2))
'https://gitlab.com/USER/PROJECT/blob/55150afe539493d650889224db136bc8d9b7ecb8/README.md#L1-2'
**Bitbucket**
..
>>> weburl = testing.dummy_bitbucket_weburl()
>>> weburl.file("README.md")
'https://bitbucket.org/USER/PROJECT/src/master/README.md'
>>> weburl.file("README.md", lines=1)
'https://bitbucket.org/USER/PROJECT/src/55150afe539493d650889224db136bc8d9b7ecb8/README.md#lines-1'
>>> weburl.file("README.md", lines=(1, 2))
'https://bitbucket.org/USER/PROJECT/src/55150afe539493d650889224db136bc8d9b7ecb8/README.md#lines-1:2'
"""
revision = self._file_revision(lines, revision, permalink)
relurl = "/".join(self.repo.relpath(file).parts)
fragment = self._format_lines(lines)
if self.is_bitbucket():
return f"{self.rooturl}/src/{revision}/{relurl}{fragment}"
elif self.is_gitlab_wiki():
# TODO: handle `fragment`?
if relurl.endswith(".md"):
relurl = relurl[: -len(".md")]
if revision == "master":
return f"{self.rooturl}/{relurl}"
else:
return f"{self.rooturl}/{relurl}?version_id={revision}"
else:
return f"{self.rooturl}/blob/{revision}/{relurl}{fragment}"
def tree(
self,
directory: Optional[Pathish] = None,
revision: Optional[str] = None,
permalink: bool = False,
) -> str:
"""
Get a URL to tree page.
"""
revision = self._remote_revision(revision, permalink)
if self.is_bitbucket():
baseurl = f"{self.rooturl}/src/{revision}"
else:
baseurl = f"{self.rooturl}/tree/{revision}"
if not directory:
return baseurl
relurl = "/".join(self.repo.relpath(directory).parts)
return f"{baseurl}/{relurl}"
def diff(
self,
revision1: Optional[str] = None,
revision2: Optional[str] = None,
permalink: bool = False,
) -> str:
"""
Get a URL to diff page.
**GitHub**
..
>>> from vcslinks import testing
>>> weburl = testing.dummy_github_weburl()
>>> weburl.diff("dev")
'https://github.com/USER/PROJECT/compare/master...dev'
>>> weburl.diff(permalink=True)
'https://github.com/USER/PROJECT/compare/master...55150afe539493d650889224db136bc8d9b7ecb8'
>>> weburl.diff("master", "dev", permalink=True) == (
... 'https://github.com/USER/PROJECT/compare/'
... '55150afe539493d650889224db136bc8d9b7ecb8'
... '...'
... '40539486fdaf08a39b57519eb06e0e200c932cfd'
... )
True
**GitLab**
..
>>> weburl = testing.dummy_gitlab_weburl()
>>> weburl.diff("dev")
'https://gitlab.com/USER/PROJECT/compare/master...dev'
**Bitbucket**
..
>>> weburl = testing.dummy_bitbucket_weburl()
>>> weburl.diff("dev")
'https://bitbucket.org/USER/PROJECT/branches/compare/dev%0Dmaster#diff'
"""
if not revision1:
revision1 = self.local_branch.remote_branch()
if permalink:
revision1 = self.repo.resolve_revision(revision1)
if revision2:
revision2 = self.repo.resolve_revision(revision2)
if not revision2:
revision2 = revision1
revision1 = "master"
rooturl = self.rooturl
if self.is_bitbucket():
return f"{rooturl}/branches/compare/{revision2}%0D{revision1}#diff"
else:
return f"{rooturl}/compare/{revision1}...{revision2}"
def blame(
self,
file: Pathish,
lines: LinesSpecifier = None,
revision: Optional[str] = None,
permalink: Optional[bool] = None,
) -> str:
"""
Get a URL to blame/annotate page.
**GitHub**
..
>>> from vcslinks import testing
>>> weburl = testing.dummy_github_weburl()
>>> weburl.blame("README.md")
'https://github.com/USER/PROJECT/blame/master/README.md'
**GitLab**
..
>>> weburl = testing.dummy_gitlab_weburl()
>>> weburl.blame("README.md")
'https://gitlab.com/USER/PROJECT/blame/master/README.md'
**Bitbucket**
..
>>> weburl = testing.dummy_bitbucket_weburl()
>>> weburl.blame("README.md")
'https://bitbucket.org/USER/PROJECT/annotate/master/README.md'
"""
revision = self._file_revision(lines, revision, permalink)
relurl = "/".join(self.repo.relpath(file).parts)
fragment = self._format_lines(lines, bitbucket_prefix=relurl)
if self.is_bitbucket():
return f"{self.rooturl}/annotate/{revision}/{relurl}{fragment}"
else:
return f"{self.rooturl}/blame/{revision}/{relurl}{fragment}"
|
from flask import current_app, _app_ctx_stack
from werkzeug.contrib.profiler import ProfilerMiddleware
try:
from flask_sqlalchemy import get_debug_queries
flask_sqlalchemy_available = True
except ImportError:
flask_sqlalchemy_available = False
PROFILER_DEFAULT_ENABLED = False
PROFILER_DEFAULT_RESTRICTIONS = []
PROFILER_DEFAULT_SQLALCHEMY_ENABLED = False
PROFILER_DEFAULT_SQLALCHEMY_THRESHOLD = 0
PROFILER_DEFAULT_SQLALCHEMY_FORMAT = "\n\n{duration:1.2e}s\n\n{statement}\n"
class Profiler(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
app.config.setdefault("PROFILER_ENABLED", PROFILER_DEFAULT_ENABLED)
app.config.setdefault("PROFILER_RESTRICTIONS", PROFILER_DEFAULT_RESTRICTIONS)
app.config.setdefault("PROFILER_SQLALCHEMY_ENABLED", PROFILER_DEFAULT_SQLALCHEMY_ENABLED)
app.config.setdefault("PROFILER_SQLALCHEMY_THRESHOLD", PROFILER_DEFAULT_SQLALCHEMY_THRESHOLD)
app.config.setdefault("PROFILER_SQLALCHEMY_FORMAT", PROFILER_DEFAULT_SQLALCHEMY_FORMAT)
if app.config["PROFILER_ENABLED"]:
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=app.config["PROFILER_RESTRICTIONS"])
if app.config["PROFILER_SQLALCHEMY_ENABLED"]:
if flask_sqlalchemy_available is False:
raise ImportError("Failed to import flask_sqlalchemy, please make sure it's installed before using the query profiler.")
if app.config["SQLALCHEMY_RECORD_QUERIES"] is False:
raise ValueError("SQLALCHEMY_RECORD_QUERIES is not set.")
with app.app_context():
app.after_request(self.__class__.log_queries)
@staticmethod
def log_queries(response):
for query in get_debug_queries():
if query.duration >= current_app.config["PROFILER_SQLALCHEMY_THRESHOLD"]:
current_app.logger.debug(current_app.config["PROFILER_SQLALCHEMY_FORMAT"]\
.format(statement=query.statement,
parameters=query.parameters,
start_time=query.start_time,
end_time=query.end_time,
duration=query.duration,
context=query.context))
return response |
from django.shortcuts import render
# Create your views here.
def show(request):
template = 'customerservice/cs_form.html'
context = {}
return render(request, template, context)
def pricing(request):
template = 'customerservice/pricing_form.html'
context = {}
return render(request, template, context)
def faq(request):
template = 'customerservice/faq.html'
context = {}
return render(request, template, context) |
# -*- coding: utf-8 -*-
import scrapy
import re
from ofertascrawler.items import Oferta
URL_CASAS_BAHIA = re.compile(r'casasbahia')
URL_MAGAZINE_LUIZA = re.compile(r'magazineluiza')
URL_MERCADO_LIVRE_PROD = re.compile(r'produto\.mercadolivre')
URL_MERCADO_LIVRE = re.compile(r'mercadolivre')
class OfertasSpider(scrapy.Spider):
name = 'ofertas'
allowed_domains = ['www.casasbahia.com.br', 'produto.mercadolivre.com.br', 'www.mercadolivre.com.br', 'www.magazineluiza.com.br']
def __init__(self, urlfile=None, user_agent=None):
if isinstance(urlfile, str):
with open(urlfile, 'r') as urls:
self.start_urls = [url.strip() for url in urls.readlines()]
elif isinstance(urlfile, list):
self.start_urls = urlfile
if user_agent:
self.user_agent = user_agent
def parse(self, response):
if re.search(URL_CASAS_BAHIA, response.url):
yield scrapy.Request(response.url, callback=self.parse_casas_bahia)
elif re.search(URL_MAGAZINE_LUIZA, response.url):
yield scrapy.Request(response.url, callback=self.parse_magazine_luiza)
elif re.search(URL_MERCADO_LIVRE_PROD, response.url):
yield scrapy.Request(response.url, callback=self.parse_produto_mercado_livre)
elif re.search(URL_MERCADO_LIVRE, response.url):
yield scrapy.Request(response.url, callback=self.parse_mercado_livre)
def parse_casas_bahia(self, response):
if response.css('.page-not-found').get():
pass
else:
item = Oferta()
item['url'] = response.url
item['categoria'] = response.css('.breadcrumb a span::text').getall()[1]
item['titulo'] = response.css('.produtoNome h1.name b::text').get()
if response.css('#ctl00_Conteudo_ctl40_spanProdutoTempIndisponivel').get():
item['disponivel'] = False
else:
item['disponivel'] = True
preco_tag = response.css('#ctl00_Conteudo_ctl00_precoPorValue')
item['moeda'] = preco_tag.css('::text').get()
try:
item['preco'] = preco_tag.css('i::text').get().replace('.', '').replace(',', '.')
except AttributeError:
pass
detalhes = response.css('.detalhesProduto')
try:
item['descricao'] = detalhes.xpath('.//div[re:test(@class, "descricao")]/p/text()').get().strip()
except AttributeError:
item['descricao'] = None
tabela_info = detalhes.css('.caracteristicasGerais').xpath('./div/dl')
item['caracteristicas'] = {}
# Caracteristicas são dispostas em <dl> com chave em <dt> e valores em <dd>
for div in tabela_info:
key = div.xpath('./dt/text()').get().strip()
values = div.xpath('./dd/text()').get().strip()
item['caracteristicas'][key] = values
yield item
def parse_magazine_luiza(self, response):
item = Oferta()
item['url'] = response.url
item['disponivel'] = response.xpath('//meta[re:test(@content, "OutOfStock")]').get() is None
item['categoria'] = None
if response.css('.breadcrumb__item').get():
item['categoria'] = response.css('.breadcrumb__item::text').get().strip()
if item['disponivel']:
try:
item['titulo'] = response.css('.header-product__title::text').get().strip()
except AttributeError:
item['titulo'] = None
preco_tag = response.css('.price-template-price-block')
item['moeda'] = preco_tag.css('.price-template__bold::text').get()
try:
item['preco'] = preco_tag.css('.price-template__text::text').get().replace('.', '').replace(',', '.')
except AttributeError:
pass
else:
item['titulo'] = response.css('.header-product__title--unavailable::text').get().strip()
item['descricao'] = ' '.join(response.xpath('//div[re:test(@itemprop, "description")]/text()').getall()).strip()
tabela_info = response.xpath('//div').xpath('./table/tr')
item['caracteristicas'] = {}
# Características são dispostas em <td class=desc_info-left> para chave e outra tabela com <td class=..left/right> para valores, para cada <tr>
for tr in tabela_info:
key = tr.css('.description__information-left::text').get().lower()
values = tr.css('.description__information-right').xpath('./table//td/text()').getall()
values = '\n'.join([val.strip() for val in values if val.strip() != ''])
item['caracteristicas'][key] = values
yield item
def parse_mercado_livre(self, response):
item = Oferta()
item['url'] = response.url
try:
item['titulo'] = response.css('.ui-pdp-title::text').get().strip()
except AttributeError:
pass
try:
item['categoria'] = response.css('.andes-breadcrumb__item a::text').get().strip()
except AttributeError:
item['categoria'] = None
# Quando um produto não está em estoque há uma mensagem de erro
item['disponivel'] = response.css('.ui-pdp-warning-message').get() is None
item['preco'] = None
if item['disponivel']:
preco_tag = response.css('.price-tag')
item['moeda'] = preco_tag.css('.price-tag-symbol::text').get()
try:
# Se existe preço, é necessário checar se existe um valor em centavos no preço
if preco_tag.css('.price-tag-cents').get():
item['preco'] = preco_tag.css('.price-tag-fraction::text').get().replace('.', '') + '.' + preco_tag.css('.price-tag-cents::text').get()
else:
item['preco'] = preco_tag.css('.price-tag-fraction::text').get().replace('.', '')
except AttributeError:
pass
# Captura descrição linha a linha, remove excesso de espaçamento e reordena por linha
item['descricao'] = '\n'.join([line.strip() for line in response.css('.ui-pdp-description__content::text').getall()])
tabela_info = response.css('.ui-pdp-specs')
item['caracteristicas'] = {'principais': {}, 'outras': {}}
# Características principais são dispostas em <tr> com chave em <th> e valor em um <span>
for tr in tabela_info.css('.andes-table__row'):
key = tr.xpath('./th/text()').get()
value = tr.xpath('./td/span/text()').get()
item['caracteristicas']['principais'][key] = value
# Outras características são dispostas em <li> com chave em <span> e valor em <p>
for li in tabela_info.css('.ui-pdp-list__item'):
key = li.xpath('./span/text()').get().strip()
value = li.xpath('./p/text()').get().strip()
item['caracteristicas']['outras'][key] = value
yield item
def parse_produto_mercado_livre(self, response):
item = Oferta()
item['url'] = response.url
item['disponivel'] = True
try:
item['categoria'] = response.css('.vip-navigation-breadcrumb-list a::text').get().strip()
except AttributeError:
item['categoria'] = None
item['titulo'] = response.css('.item-title__primary::text').get().strip()
preco_tag = response.css('.price-tag')
item['moeda'] = preco_tag.css('.price-tag-symbol::text').get()
try:
# É necessário checar se existe um valor em centavos no preço
if preco_tag.css('.price-tag-cents').get():
item['preco'] = preco_tag.css('.price-tag-fraction::text').get().replace('.', '') + '.' + preco_tag.css('.price-tag-cents::text').get()
else:
item['preco'] = preco_tag.css('.price-tag-fraction::text').get().replace('.', '')
except AttributeError:
pass
# Captura descrição linha a linha, remove excesso de espaçamento e reordena por linha
item['descricao'] = '\n'.join([line.strip() for line in response.css('.item-description__text p::text').getall()])
tabela_info = response.css('.specs-wrapper').xpath('./section/ul/li')
item['caracteristicas'] = {}
# Características são dispostas em <li> com chave em <strong> e valor em <span>
for li in tabela_info:
key = li.xpath('./strong/text()').get().strip()
values = li.xpath('./span/text()').get().strip()
item['caracteristicas'][key] = values
yield item
|
import datetime
from django_featurette.models import Feature
def get_enabled_features_for_user(user):
"""
Returns a list of features enabled for the current authenticated user.
If the user can't access any feature or is anonymous, returns an empty list.
"""
enabled_features = []
if user.is_authenticated():
now = datetime.datetime.now()
enabled_features = Feature.objects.filter(is_active=True,
start_date__lte=now, end_date__gte=now,
group__in=user.groups.all())
return enabled_features
def is_feature_enabled_for_user(feature_key, user):
"""
Returns True if a certain feature is enabled for a user.
Usefull for a fine-grained control inside a view function.
If no feature exists with the given key, it returns False.
"""
try:
feature = Feature.objects.get(key=feature_key)
return feature in get_enabled_features_for_user(user)
except Feature.DoesNotExist:
return False
|
from abc import abstractmethod
from abc import ABC
from sa_pathfinding.environments.generics.state import State
class Heuristic(ABC):
__slots__ = '_name'
def __init__(self):
self._name = 'Uninitialized'
def __str__(self):
return f'Heuristic, type: {self._name}'
@property
def name(self):
return self._name
@abstractmethod
def get_cost(self, node: State, goal: State):
pass
class ZeroHeuristic(Heuristic):
def __init__(self):
super().__init__()
self._name = 'ZERO'
def __str__(self):
return super().__str__()
def get_cost(self, start: State = None, goal: State = None):
return 0.0
|
from pathlib import Path
import pandas as pd
file_dir = Path(__file__).parent
with open(file_dir / "solutions.csv", "r") as fp:
solutions = pd.read_csv(fp, header=0, index_col=0)
|
#!/usr/bin/env python3
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests to check the functionality of Google Fonts Tools"""
import os
import re
from glob import glob
import unittest
import subprocess
class TestSubcommands(unittest.TestCase):
"""Functional tests to determine that bin/gftools runs correctly"""
def setUp(self):
self.bin_path = os.path.join('bin')
self.maxDiff = None
def test_list_subcommands_has_all_scripts(self):
"""Tests if the output from running gftools --list-subcommands
matches the scripts within the bin folder"""
scripts = [re.sub('\.\w*$', '', f.replace('gftools-', '')) for f in \
os.listdir(self.bin_path) if f.startswith('gftools-')]
subcommands = subprocess.check_output(['python',
os.path.join('bin', 'gftools'),
'--list-subcommands'], encoding="utf-8").split()
self.assertEqual(sorted(scripts), sorted(subcommands))
class TestGFToolsScripts(unittest.TestCase):
"""Functional tests to determine whether each script can execute successfully"""
def setUp(self):
self.get_path = lambda name: os.path.join('bin', 'gftools-' + name + '.py')
self.example_dir = os.path.join('data', 'test', 'cabin')
self.example_font = os.path.join(self.example_dir, 'Cabin-Regular.ttf')
self.example_family = glob(os.path.join("data", "test", "mavenpro", "*.ttf"))
self.example_vf_font = os.path.join("data", "test", 'Lora-Roman-VF.ttf')
self.example_vf_stat = os.path.join("data", "test", 'lora_stat.yaml')
self.example_builder_config = os.path.join("data", "test", 'builder_test.yaml')
self.example_builder_config_2_sources = os.path.join("data", "test", "Libre-Bodoni", "sources", "config.yaml")
self.src_vtt_font = os.path.join("data", "test", "Inconsolata[wdth,wght].ttf")
self.gf_family_dir = os.path.join('data', 'test', 'mock_googlefonts', 'ofl', 'abel')
self.nam_file = os.path.join('data', 'test', 'arabic_unique-glyphs.nam')
self.blacklisted_scripts = [
['python', self.get_path('build-contributors')], # requires source folder of git commits
['python', self.get_path('check-category')], # Requires GF key
['python', self.get_path('check-gf-github')], # Requires github credentials
['python', self.get_path('build-font2ttf')], # Requires fontforge
['python', self.get_path('generate-glyphdata')], # Generates desired_glyph_data.json
['python', self.get_path('metadata-vs-api')], # Requires an API key
['python', self.get_path('update-version')], # Needs to know the current font version and the next version to set
['python', self.get_path('family-html-snippet')], # Requires GF api token
['python', self.get_path('qa')], # Has seperate checks
['python', self.get_path('sanity-check')], # Very old doesn't follow new spec. Should be deprecated.
]
self.dir_before_tests = os.listdir(self.example_dir)
def tearDown(self):
"""Clears the example folder of any files created during the unit tests"""
files_to_delete = set(os.listdir(self.example_dir)) - set(self.dir_before_tests)
for f in files_to_delete:
os.remove(os.path.join(self.example_dir, f))
def check_script(self, command):
"""Template for unit testing the python scripts"""
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
stdout, stderr = process.communicate()
self.assertNotIn('Err', stderr, ' '.join(command) + ':\n\n' + stderr)
def test_add_font(self):
self.check_script(['python', self.get_path('add-font'), self.gf_family_dir])
def test_build_ofl(self):
self.check_script(['python', self.get_path('build-ofl'), self.example_dir])
def test_check_bbox(self):
self.check_script(['python', self.get_path('check-bbox'), self.example_font, '--glyphs', '--extremes'])
def test_check_copyright_notices(self):
self.check_script(['python', self.get_path('check-copyright-notices')])
def test_check_font_version(self):
self.check_script(['python', self.get_path('check-font-version'), self.example_font])
def test_check_name(self):
self.check_script(['python', self.get_path('check-name'), self.example_font])
def test_check_vtt_compatibility(self):
self.check_script(['python', self.get_path('check-vtt-compatibility'), self.example_font, self.example_font])
def test_compare_font(self):
self.check_script(['python', self.get_path('compare-font'), self.example_font, self.example_font])
def test_find_features(self):
self.check_script(['python', self.get_path('find-features'), self.example_font])
def test_fix_ascii_fontmetadata(self):
self.check_script(['python', self.get_path('fix-ascii-fontmetadata'), self.example_font])
def test_fix_cmap(self):
self.check_script(['python', self.get_path('fix-cmap'), self.example_font])
def test_fix_familymetadata(self):
self.check_script(['python', self.get_path('fix-familymetadata'), self.example_font])
def test_fix_fsselection(self):
self.check_script(['python', self.get_path('fix-fsselection'), self.example_font])
def test_fix_fstype(self):
self.check_script(['python', self.get_path('fix-fstype'), self.example_font])
def test_fix_gasp(self):
self.check_script(['python', self.get_path('fix-gasp'), self.example_font])
def test_fix_glyph_private_encoding(self):
self.check_script(['python', self.get_path('fix-glyph-private-encoding'), self.example_font])
def test_fix_glyphs(self):
self.check_script(['python', self.get_path('fix-glyphs')])
def test_fix_hinting(self):
self.check_script(['python', self.get_path('fix-hinting'), self.example_font])
def test_fix_isfixedpitch(self):
self.check_script(['python', self.get_path('fix-isfixedpitch'), "--fonts", self.example_font])
def test_fix_nameids(self):
self.check_script(['python', self.get_path('fix-nameids'), self.example_font])
def test_fix_nonhinting(self):
self.check_script(['python', self.get_path('fix-nonhinting'), self.example_font, self.example_font + '.fix'])
def test_fix_ttfautohint(self):
self.check_script(['python', self.get_path('fix-ttfautohint'), self.example_font])
def test_fix_vendorid(self):
self.check_script(['python', self.get_path('fix-vendorid'), self.example_font])
def test_fix_vertical_metrics(self):
self.check_script(['python', self.get_path('fix-vertical-metrics'), self.example_font])
def test_font_diff(self):
self.check_script(['python', self.get_path('font-diff'), self.example_font, self.example_font])
def test_font_weights_coveraget(self):
self.check_script(['python', self.get_path('font-weights-coverage'), self.example_font])
def test_fix_font(self):
self.check_script(['python', self.get_path('fix-font'), self.example_font])
def test_fix_family(self):
self.check_script(['python', self.get_path('fix-family')] + self.example_family)
def test_list_italicangle(self):
self.check_script(['python', self.get_path('list-italicangle'), self.example_font])
def test_list_panose(self):
self.check_script(['python', self.get_path('list-panose'), self.example_font])
def test_list_variable_source(self):
self.check_script(['python', self.get_path('list-variable-source')])
def test_list_weightclass(self):
self.check_script(['python', self.get_path('list-weightclass'), self.example_font])
def test_list_widthclass(self):
self.check_script(['python', self.get_path('list-widthclass'), self.example_font])
def test_nametable_from_filename(self):
self.check_script(['python', self.get_path('nametable-from-filename'), self.example_font])
def test_namelist(self):
self.check_script(['python', self.get_path('namelist'), self.example_font])
def test_ots(self):
self.check_script(['python', self.get_path('ots'), self.example_font])
def test_rangify(self):
self.check_script(['python', self.get_path('rangify'), self.nam_file])
def test_test_gf_coverage(self):
self.check_script(['python', self.get_path('test-gf-coverage'), self.example_font])
def test_ttf2cp(self):
self.check_script(['python', self.get_path('ttf2cp'), self.example_font])
def test_unicode_names(self):
self.check_script(['python', self.get_path('unicode-names'), "--nam_file", self.nam_file])
def test_update_families(self):
self.check_script(['python', self.get_path('update-families'), self.example_font])
def test_update_version(self):
self.check_script(['python', self.get_path('update-version'), self.example_font])
def test_varfont_info(self):
self.check_script(['python', self.get_path('varfont-info'), self.example_vf_font])
def test_what_subsets(self):
self.check_script(['python', self.get_path('what-subsets'), self.example_font])
def test_rename_font(self):
self.check_script(['python', self.get_path('rename-font'), self.example_font, "Foobar"])
# Temporarily disabling this until we close issue #13
# (https://github.com/googlefonts/tools/issues/13)
# See also https://github.com/googlefonts/fontbakery/issues/1535
# def test_update_families(self):
# self.check_script(['python', self.get_path('update-families'), self.example_font])
def test_update_nameids(self):
self.check_script(['python', self.get_path('update-nameids'), self.example_font, "-c", "Foobar"])
def test_check_vtt_compile(self):
self.check_script(['python', self.get_path('check-vtt-compile'), self.src_vtt_font])
def test_gen_stat(self):
self.check_script(
['python', self.get_path('gen-stat'), self.example_vf_font, "--axis-order", "wght"]
)
def test_gen_stat2(self):
self.check_script(
['python', self.get_path('gen-stat'), self.example_vf_font, "--src", self.example_vf_stat]
)
def test_builder(self):
self.check_script(['python', self.get_path('builder'), self.example_builder_config])
def test_builder_2_sources(self):
self.check_script(["python", self.get_path("builder"), self.example_builder_config_2_sources])
if __name__ == '__main__':
unittest.main()
|
# Microsoft Installer Library
# (C) 2003 Martin v. Loewis
import win32com.client.gencache
import win32com.client
import pythoncom, pywintypes
from win32com.client import constants
import re, string, os, sets, glob, subprocess, sys, _winreg, struct, _msi
try:
basestring
except NameError:
basestring = (str, unicode)
# Partially taken from Wine
datasizemask= 0x00ff
type_valid= 0x0100
type_localizable= 0x0200
typemask= 0x0c00
type_long= 0x0000
type_short= 0x0400
type_string= 0x0c00
type_binary= 0x0800
type_nullable= 0x1000
type_key= 0x2000
# XXX temporary, localizable?
knownbits = datasizemask | type_valid | type_localizable | \
typemask | type_nullable | type_key
# Summary Info Property IDs
PID_CODEPAGE=1
PID_TITLE=2
PID_SUBJECT=3
PID_AUTHOR=4
PID_KEYWORDS=5
PID_COMMENTS=6
PID_TEMPLATE=7
PID_LASTAUTHOR=8
PID_REVNUMBER=9
PID_LASTPRINTED=11
PID_CREATE_DTM=12
PID_LASTSAVE_DTM=13
PID_PAGECOUNT=14
PID_WORDCOUNT=15
PID_CHARCOUNT=16
PID_APPNAME=18
PID_SECURITY=19
def reset():
global _directories
_directories = sets.Set()
def EnsureMSI():
win32com.client.gencache.EnsureModule('{000C1092-0000-0000-C000-000000000046}', 1033, 1, 0)
def EnsureMSM():
try:
win32com.client.gencache.EnsureModule('{0ADDA82F-2C26-11D2-AD65-00A0C9AF11A6}', 0, 1, 0)
except pywintypes.com_error:
win32com.client.gencache.EnsureModule('{0ADDA82F-2C26-11D2-AD65-00A0C9AF11A6}', 0, 2, 0)
_Installer=None
def MakeInstaller():
global _Installer
if _Installer is None:
EnsureMSI()
_Installer = win32com.client.Dispatch('WindowsInstaller.Installer',
resultCLSID='{000C1090-0000-0000-C000-000000000046}')
return _Installer
_Merge=None
def MakeMerge2():
global _Merge
if _Merge is None:
EnsureMSM()
_Merge = win32com.client.Dispatch("Msm.Merge2.1")
return _Merge
class Table:
def __init__(self, name):
self.name = name
self.fields = []
def add_field(self, index, name, type):
self.fields.append((index,name,type))
def sql(self):
fields = []
keys = []
self.fields.sort()
fields = [None]*len(self.fields)
for index, name, type in self.fields:
index -= 1
unk = type & ~knownbits
if unk:
print "%s.%s unknown bits %x" % (self.name, name, unk)
size = type & datasizemask
dtype = type & typemask
if dtype == type_string:
if size:
tname="CHAR(%d)" % size
else:
tname="CHAR"
elif dtype == type_short:
assert size==2
tname = "SHORT"
elif dtype == type_long:
assert size==4
tname="LONG"
elif dtype == type_binary:
assert size==0
tname="OBJECT"
else:
tname="unknown"
print "%s.%sunknown integer type %d" % (self.name, name, size)
if type & type_nullable:
flags = ""
else:
flags = " NOT NULL"
if type & type_localizable:
flags += " LOCALIZABLE"
fields[index] = "`%s` %s%s" % (name, tname, flags)
if type & type_key:
keys.append("`%s`" % name)
fields = ", ".join(fields)
keys = ", ".join(keys)
return "CREATE TABLE %s (%s PRIMARY KEY %s)" % (self.name, fields, keys)
def create(self, db):
v = db.OpenView(self.sql())
v.Execute(None)
v.Close()
class Binary:
def __init__(self, fname):
self.name = fname
def __repr__(self):
return 'msilib.Binary(os.path.join(dirname,"%s"))' % self.name
def gen_schema(destpath, schemapath):
d = MakeInstaller()
schema = d.OpenDatabase(schemapath,
win32com.client.constants.msiOpenDatabaseModeReadOnly)
# XXX ORBER BY
v=schema.OpenView("SELECT * FROM _Columns")
curtable=None
tables = []
v.Execute(None)
f = open(destpath, "wt")
f.write("from msilib import Table\n")
while 1:
r=v.Fetch()
if not r:break
name=r.StringData(1)
if curtable != name:
f.write("\n%s = Table('%s')\n" % (name,name))
curtable = name
tables.append(name)
f.write("%s.add_field(%d,'%s',%d)\n" %
(name, r.IntegerData(2), r.StringData(3), r.IntegerData(4)))
v.Close()
f.write("\ntables=[%s]\n\n" % (", ".join(tables)))
# Fill the _Validation table
f.write("_Validation_records = [\n")
v = schema.OpenView("SELECT * FROM _Validation")
v.Execute(None)
while 1:
r = v.Fetch()
if not r:break
# Table, Column, Nullable
f.write("(%s,%s,%s," %
(`r.StringData(1)`, `r.StringData(2)`, `r.StringData(3)`))
def put_int(i):
if r.IsNull(i):f.write("None, ")
else:f.write("%d," % r.IntegerData(i))
def put_str(i):
if r.IsNull(i):f.write("None, ")
else:f.write("%s," % `r.StringData(i)`)
put_int(4) # MinValue
put_int(5) # MaxValue
put_str(6) # KeyTable
put_int(7) # KeyColumn
put_str(8) # Category
put_str(9) # Set
put_str(10)# Description
f.write("),\n")
f.write("]\n\n")
f.close()
def gen_sequence(destpath, msipath):
dir = os.path.dirname(destpath)
d = MakeInstaller()
seqmsi = d.OpenDatabase(msipath,
win32com.client.constants.msiOpenDatabaseModeReadOnly)
v = seqmsi.OpenView("SELECT * FROM _Tables");
v.Execute(None)
f = open(destpath, "w")
print >>f, "import msilib,os;dirname=os.path.dirname(__file__)"
tables = []
while 1:
r = v.Fetch()
if not r:break
table = r.StringData(1)
tables.append(table)
f.write("%s = [\n" % table)
v1 = seqmsi.OpenView("SELECT * FROM `%s`" % table)
v1.Execute(None)
info = v1.ColumnInfo(constants.msiColumnInfoTypes)
while 1:
r = v1.Fetch()
if not r:break
rec = []
for i in range(1,r.FieldCount+1):
if r.IsNull(i):
rec.append(None)
elif info.StringData(i)[0] in "iI":
rec.append(r.IntegerData(i))
elif info.StringData(i)[0] in "slSL":
rec.append(r.StringData(i))
elif info.StringData(i)[0]=="v":
size = r.DataSize(i)
bytes = r.ReadStream(i, size, constants.msiReadStreamBytes)
bytes = bytes.encode("latin-1") # binary data represented "as-is"
if table == "Binary":
fname = rec[0]+".bin"
open(os.path.join(dir,fname),"wb").write(bytes)
rec.append(Binary(fname))
else:
rec.append(bytes)
else:
raise "Unsupported column type", info.StringData(i)
f.write(repr(tuple(rec))+",\n")
v1.Close()
f.write("]\n\n")
v.Close()
f.write("tables=%s\n" % repr(map(str,tables)))
f.close()
class _Unspecified:pass
def change_sequence(seq, action, seqno=_Unspecified, cond = _Unspecified):
"Change the sequence number of an action in a sequence list"
for i in range(len(seq)):
if seq[i][0] == action:
if cond is _Unspecified:
cond = seq[i][1]
if seqno is _Unspecified:
seqno = seq[i][2]
seq[i] = (action, cond, seqno)
return
raise ValueError, "Action not found in sequence"
def add_data(db, table, values):
d = MakeInstaller()
v = db.OpenView("SELECT * FROM `%s`" % table)
count = v.ColumnInfo(0).FieldCount
r = d.CreateRecord(count)
for value in values:
assert len(value) == count, value
for i in range(count):
field = value[i]
if isinstance(field, (int, long)):
r.SetIntegerData(i+1,field)
elif isinstance(field, basestring):
r.SetStringData(i+1,field)
elif field is None:
pass
elif isinstance(field, Binary):
r.SetStream(i+1, field.name)
else:
raise TypeError, "Unsupported type %s" % field.__class__.__name__
v.Modify(win32com.client.constants.msiViewModifyInsert, r)
r.ClearData()
v.Close()
def add_stream(db, name, path):
d = MakeInstaller()
v = db.OpenView("INSERT INTO _Streams (Name, Data) VALUES ('%s', ?)" % name)
r = d.CreateRecord(1)
r.SetStream(1, path)
v.Execute(r)
v.Close()
def init_database(name, schema,
ProductName, ProductCode, ProductVersion,
Manufacturer,
request_uac = False):
try:
os.unlink(name)
except OSError:
pass
ProductCode = ProductCode.upper()
d = MakeInstaller()
# Create the database
db = d.OpenDatabase(name,
win32com.client.constants.msiOpenDatabaseModeCreate)
# Create the tables
for t in schema.tables:
t.create(db)
# Fill the validation table
add_data(db, "_Validation", schema._Validation_records)
# Initialize the summary information, allowing atmost 20 properties
si = db.GetSummaryInformation(20)
si.SetProperty(PID_TITLE, "Installation Database")
si.SetProperty(PID_SUBJECT, ProductName)
si.SetProperty(PID_AUTHOR, Manufacturer)
si.SetProperty(PID_TEMPLATE, msi_type)
si.SetProperty(PID_REVNUMBER, gen_uuid())
if request_uac:
wc = 2 # long file names, compressed, original media
else:
wc = 2 | 8 # +never invoke UAC
si.SetProperty(PID_WORDCOUNT, wc)
si.SetProperty(PID_PAGECOUNT, 200)
si.SetProperty(PID_APPNAME, "Python MSI Library")
# XXX more properties
si.Persist()
add_data(db, "Property", [
("ProductName", ProductName),
("ProductCode", ProductCode),
("ProductVersion", ProductVersion),
("Manufacturer", Manufacturer),
("ProductLanguage", "1033")])
db.Commit()
return db
def add_tables(db, module):
for table in module.tables:
add_data(db, table, getattr(module, table))
def make_id(str):
#str = str.replace(".", "_") # colons are allowed
str = str.replace(" ", "_")
str = str.replace("-", "_")
str = str.replace("+", "_")
if str[0] in string.digits:
str = "_"+str
assert re.match("^[A-Za-z_][A-Za-z0-9_.]*$", str), "FILE"+str
return str
def gen_uuid():
return str(pythoncom.CreateGuid())
class CAB:
def __init__(self, name):
self.name = name
self.files = []
self.filenames = sets.Set()
self.index = 0
def gen_id(self, dir, file):
logical = _logical = make_id(file)
pos = 1
while logical in self.filenames:
logical = "%s.%d" % (_logical, pos)
pos += 1
self.filenames.add(logical)
return logical
def append(self, full, file, logical = None):
if os.path.isdir(full):
return
if not logical:
logical = self.gen_id(dir, file)
self.index += 1
self.files.append((full, logical))
return self.index, logical
def commit(self, db):
try:
os.unlink(self.name+".cab")
except OSError:
pass
_msi.FCICreate(self.name+".cab", self.files)
add_data(db, "Media",
[(1, self.index, None, "#"+self.name, None, None)])
add_stream(db, self.name, self.name+".cab")
os.unlink(self.name+".cab")
db.Commit()
_directories = sets.Set()
class Directory:
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
"""Create a new directory in the Directory table. There is a current component
at each point in time for the directory, which is either explicitly created
through start_component, or implicitly when files are added for the first
time. Files are added into the current component, and into the cab file.
To create a directory, a base directory object needs to be specified (can be
None), the path to the physical directory, and a logical directory name.
Default specifies the DefaultDir slot in the directory table. componentflags
specifies the default flags that new components get."""
index = 1
_logical = make_id(_logical)
logical = _logical
while logical in _directories:
logical = "%s%d" % (_logical, index)
index += 1
_directories.add(logical)
self.db = db
self.cab = cab
self.basedir = basedir
self.physical = physical
self.logical = logical
self.component = None
self.short_names = sets.Set()
self.ids = sets.Set()
self.keyfiles = {}
self.componentflags = componentflags
if basedir:
self.absolute = os.path.join(basedir.absolute, physical)
blogical = basedir.logical
else:
self.absolute = physical
blogical = None
# initially assume that all files in this directory are unpackaged
# as files from self.absolute get added, this set is reduced
self.unpackaged_files = set()
for f in os.listdir(self.absolute):
if os.path.isfile(os.path.join(self.absolute, f)):
self.unpackaged_files.add(f)
add_data(db, "Directory", [(logical, blogical, default)])
def start_component(self, component = None, feature = None, flags = None, keyfile = None, uuid=None):
"""Add an entry to the Component table, and make this component the current for this
directory. If no component name is given, the directory name is used. If no feature
is given, the current feature is used. If no flags are given, the directory's default
flags are used. If no keyfile is given, the KeyPath is left null in the Component
table."""
if flags is None:
flags = self.componentflags
if uuid is None:
uuid = gen_uuid()
else:
uuid = uuid.upper()
if component is None:
component = self.logical
self.component = component
if Win64:
flags |= 256
if keyfile:
keyid = self.cab.gen_id(self.absolute, keyfile)
self.keyfiles[keyfile] = keyid
else:
keyid = None
add_data(self.db, "Component",
[(component, uuid, self.logical, flags, None, keyid)])
if feature is None:
feature = current_feature
add_data(self.db, "FeatureComponents",
[(feature.id, component)])
def make_short(self, file):
file = re.sub(r'[\?|><:/*"+,;=\[\]]', '_', file) # restrictions on short names
parts = file.split(".")
if len(parts)>1:
suffix = parts[-1].upper()
else:
suffix = None
prefix = parts[0].upper()
if len(prefix) <= 8 and (not suffix or len(suffix)<=3):
if suffix:
file = prefix+"."+suffix
else:
file = prefix
assert file not in self.short_names
else:
prefix = prefix[:6]
if suffix:
suffix = suffix[:3]
pos = 1
while 1:
if suffix:
file = "%s~%d.%s" % (prefix, pos, suffix)
else:
file = "%s~%d" % (prefix, pos)
if file not in self.short_names: break
pos += 1
assert pos < 10000
if pos in (10, 100, 1000):
prefix = prefix[:-1]
self.short_names.add(file)
return file
def add_file(self, file, src=None, version=None, language=None):
"""Add a file to the current component of the directory, starting a new one
if there is no current component. By default, the file name in the source
and the file table will be identical. If the src file is specified, it is
interpreted relative to the current directory. Optionally, a version and a
language can be specified for the entry in the File table."""
if not self.component:
self.start_component(self.logical, current_feature)
if not src:
# Allow relative paths for file if src is not specified
src = file
file = os.path.basename(file)
absolute = os.path.join(self.absolute, src)
if absolute.startswith(self.absolute):
# mark file as packaged
relative = absolute[len(self.absolute)+1:]
if relative in self.unpackaged_files:
self.unpackaged_files.remove(relative)
assert not re.search(r'[\?|><:/*]"', file) # restrictions on long names
if self.keyfiles.has_key(file):
logical = self.keyfiles[file]
else:
logical = None
sequence, logical = self.cab.append(absolute, file, logical)
assert logical not in self.ids
self.ids.add(logical)
short = self.make_short(file)
full = "%s|%s" % (short, file)
filesize = os.stat(absolute).st_size
# constants.msidbFileAttributesVital
# Compressed omitted, since it is the database default
# could add r/o, system, hidden
attributes = 512
add_data(self.db, "File",
[(logical, self.component, full, filesize, version,
language, attributes, sequence)])
if not version:
# Add hash if the file is not versioned
filehash = MakeInstaller().FileHash(absolute, 0)
add_data(self.db, "MsiFileHash",
[(logical, 0, filehash.IntegerData(1),
filehash.IntegerData(2), filehash.IntegerData(3),
filehash.IntegerData(4))])
# Automatically remove .pyc/.pyo files on uninstall (2)
# XXX: adding so many RemoveFile entries makes installer unbelievably
# slow. So instead, we have to use wildcard remove entries
# if file.endswith(".py"):
# add_data(self.db, "RemoveFile",
# [(logical+"c", self.component, "%sC|%sc" % (short, file),
# self.logical, 2),
# (logical+"o", self.component, "%sO|%so" % (short, file),
# self.logical, 2)])
def glob(self, pattern, exclude = None):
"""Add a list of files to the current component as specified in the
glob pattern. Individual files can be excluded in the exclude list."""
files = glob.glob1(self.absolute, pattern)
for f in files:
if exclude and f in exclude: continue
self.add_file(f)
return files
def remove_pyc(self):
"Remove .pyc/.pyo files from __pycache__ on uninstall"
directory = self.logical + "_pycache"
add_data(self.db, "Directory", [(directory, self.logical, "__PYCA~1|__pycache__")])
flags = 256 if Win64 else 0
add_data(self.db, "Component",
[(directory, gen_uuid(), directory, flags, None, None)])
add_data(self.db, "FeatureComponents", [(current_feature.id, directory)])
add_data(self.db, "CreateFolder", [(directory, directory)])
add_data(self.db, "RemoveFile",
[(self.component, self.component, "*.*", directory, 2),
])
def removefile(self, key, pattern):
"Add a RemoveFile entry"
add_data(self.db, "RemoveFile", [(self.component+key, self.component, pattern, self.logical, 2)])
class Feature:
def __init__(self, db, id, title, desc, display, level = 1,
parent=None, directory = None, attributes=0):
self.id = id
if parent:
parent = parent.id
add_data(db, "Feature",
[(id, parent, title, desc, display,
level, directory, attributes)])
def set_current(self):
global current_feature
current_feature = self
class Control:
def __init__(self, dlg, name):
self.dlg = dlg
self.name = name
def event(self, ev, arg, cond = "1", order = None):
add_data(self.dlg.db, "ControlEvent",
[(self.dlg.name, self.name, ev, arg, cond, order)])
def mapping(self, ev, attr):
add_data(self.dlg.db, "EventMapping",
[(self.dlg.name, self.name, ev, attr)])
def condition(self, action, condition):
add_data(self.dlg.db, "ControlCondition",
[(self.dlg.name, self.name, action, condition)])
class RadioButtonGroup(Control):
def __init__(self, dlg, name, property):
self.dlg = dlg
self.name = name
self.property = property
self.index = 1
def add(self, name, x, y, w, h, text, value = None):
if value is None:
value = name
add_data(self.dlg.db, "RadioButton",
[(self.property, self.index, value,
x, y, w, h, text, None)])
self.index += 1
class Dialog:
def __init__(self, db, name, x, y, w, h, attr, title, first, default, cancel):
self.db = db
self.name = name
self.x, self.y, self.w, self.h = x,y,w,h
add_data(db, "Dialog", [(name, x,y,w,h,attr,title,first,default,cancel)])
def control(self, name, type, x, y, w, h, attr, prop, text, next, help):
add_data(self.db, "Control",
[(self.name, name, type, x, y, w, h, attr, prop, text, next, help)])
return Control(self, name)
def text(self, name, x, y, w, h, attr, text):
return self.control(name, "Text", x, y, w, h, attr, None,
text, None, None)
def bitmap(self, name, x, y, w, h, text):
return self.control(name, "Bitmap", x, y, w, h, 1, None, text, None, None)
def line(self, name, x, y, w, h):
return self.control(name, "Line", x, y, w, h, 1, None, None, None, None)
def pushbutton(self, name, x, y, w, h, attr, text, next):
return self.control(name, "PushButton", x, y, w, h, attr, None, text, next, None)
def radiogroup(self, name, x, y, w, h, attr, prop, text, next):
add_data(self.db, "Control",
[(self.name, name, "RadioButtonGroup",
x, y, w, h, attr, prop, text, next, None)])
return RadioButtonGroup(self, name, prop)
def checkbox(self, name, x, y, w, h, attr, prop, text, next):
return self.control(name, "CheckBox", x, y, w, h, attr, prop, text, next, None)
def pe_type(path):
header = open(path, "rb").read(1000)
# offset of PE header is at offset 0x3c
pe_offset = struct.unpack("<i", header[0x3c:0x40])[0]
assert header[pe_offset:pe_offset+4] == "PE\0\0"
machine = struct.unpack("<H", header[pe_offset+4:pe_offset+6])[0]
return machine
def set_arch_from_file(path):
global msi_type, Win64, arch_ext
machine = pe_type(path)
if machine == 0x14c:
# i386
msi_type = "Intel"
Win64 = 0
arch_ext = ''
elif machine == 0x200:
# Itanium
msi_type = "Intel64"
Win64 = 1
arch_ext = '.ia64'
elif machine == 0x8664:
# AMD64
msi_type = "x64"
Win64 = 1
arch_ext = '.amd64'
else:
raise ValueError, "Unsupported architecture"
msi_type += ";1033"
|
from random import shuffle
n = int(input('Quantidade de Alunos: '))
nome = []
for x in range(n):
n = (input('Digite o nome do Aluno nº {}: '.format(x+1)))
nome.append(n)
shuffle(nome)
print(nome) |
"""
Make vertical profile plots.
"""
import galene as ga
xlim_var = {
'temp': [0, 25],
'psal': ([0, 15], [0, 35]),
}
data_id_list = [
'ices-ctd',
'run001',
'run002',
]
var_list = ['temp', 'psal']
for var in var_list:
dataset_list = []
for data_id in data_id_list:
d = ga.read_dataset(data_id, 'profile', var)
dataset_list.append(d)
# find pairs
pairs = ga.find_station_pairs(*dataset_list, time_threshold=300.)
for key in pairs:
cube_list = []
for data_id in data_id_list:
if data_id in pairs[key]:
cube = pairs[key][data_id]
cube_list.append(cube)
ga.save_profile_figure(cube_list, xlim=xlim_var.get(var), alpha=0.7)
|
# manager.py
# author: andrew young
# email: ayoung@thewulf.org
from django.db.models import Manager
from elasticsearch.helpers import bulk as elasticbulk
from elasticmodels.utils import serializers
from elasticmodels.utils.aliasing import AliasedIndex
from elasticmodels.utils.elasticobject import ElasticObject
from elasticmodels.tasks import indexing_task, bulk_indexing_task
class AliasAlreadyExists(Exception): pass
class ElasticModelManager(ElasticObject, Manager):
"""the interface for searching, setting up and managing the elasticsearch
document related to a django model.
"""
@indexing_task
def index_document(self, pk, instance, create=False):
return self.index(id=pk, body=instance, op_type="create" if create else "index")
@indexing_task
def remove_document(self, pk):
return self.delete(id=pk)
def search_es(self, raw_only=False, *args, **kwargs):
""" put a docstring here
"""
raw_results = self.search(*args, **kwargs)
if raw_only:
return raw_results
results = self._convert_to_queryset(raw_results)
return results, raw_results
def _convert_to_queryset(self, raw_results):
""" takes the raw elasticsearch results and creates a queryset that will
maintain the ordering of the elasticsearch results.
"""
pks = [result["_source"]["id"] for result in raw_results["hits"]["hits"]]
clauses = " ".join(["WHEN id={0} THEN {1}".format(pk, i)
for i, pk in enumerate(pks)])
ordering = "CASE {0} END".format(clauses)
results = self.filter(pk__in=pks)\
.extra(select={"ordering": ordering}, order_by=("ordering",))
return results
def _put_mapping(self):
return self.indices.put_mapping(index=self.index_name,
doc_type=self.doctype_name, mapping=self.mapping)
@bulk_indexing_task
def index_queryset(self, queryset):
# TODO
# elasticbulk
pass
|
# ####erwin_model_graph.py
# # erwin文件反向工程,生成元数据存入表。
from config.config import cfg as config
from schema import metadata_eng_reverse as mdr
from mdata import metadata_initialize as mdi
from privilege import user_mngt as ur
import win32com.client
logger = config.logger
# 所有外键关系
all_rel_ref_list = []
# 所有Key信息
all_key_list = []
def read_erwin_file(filename, rel_only=False):
# 源文件
# filename = "D:\WorkDir\项目工作\合同中心\Contract_New202003.erwin"
global all_rel_ref_list, all_key_list
all_rel_ref_list = []
all_key_list = []
# 创建COM对象
scapi = win32com.client.Dispatch('AllFusionERwin.SCAPI')
# scapi=win32com.client.Dispatch('ADODB.Connection')
# 建立与持久装置中模型的连接
scPUnit = scapi.PersistenceUnits.Add(filename, "RDO=yes")
# 建立存取内存中模型数据的连接
scSession = scapi.Sessions.Add()
scSession.Open(scPUnit, 0, 0)
# 事务控制
# scTranId = scSession.BeginTransaction()
# 获取所有对象Relationship
# scRObjects = scSession.ModelObjects.Collect(scSession.ModelObjects.Root, 'Relationship', 1)
# 获取所有Entity模型对象
scMObjects = scSession.ModelObjects.Collect(scSession.ModelObjects.Root, 'Entity', 1)
all_columns_list = []
all_table_list = []
for scObj in scMObjects:
# 产生table的元数据
(schema, table_name, table_comment, tables_dict) = generate_Tables(scObj)
if tables_dict is not None:
all_table_list.append(tables_dict)
logger.info("table mapping done,table_name={}".format(table_name))
# 获取该Entity的所有Attribute对象
scAttrObjects = scSession.ModelObjects.Collect(scObj, 'Attribute', 1)
# 获取该Entity的所有key对象
sckeyObjects = scSession.ModelObjects.Collect(scObj, 'Key_Group', 1)
key_list = None
for sckeyObject in sckeyObjects:
pk = sckeyObject.Properties('Key_Group_Type').Value
if pk is not None and pk == 'PK':
sckeys = scSession.ModelObjects.Collect(sckeyObject, 'Key_Group_Member', 1)
if sckeys is not None:
key_list = generate_key_list(schema, sckeys, table_name, table_comment)
all_columns_list += generate_attributes(scAttrObjects, schema, table_name, table_comment, key_list, rel_only)
return all_table_list, all_columns_list
def generate_key_list(schema, sckeys, table_name, table_comment):
global all_key_list
key_list = []
for key in sckeys:
# key_name = key.Properties('Name').Value
key_name = key.Properties('Physical_Name').Value
key_ref = key.Properties('Attribute_Ref').Value
key_list.append(key_name)
key_dict = {}
key_dict["TABLE_SCHEMA"] = schema
key_dict["TABLE_NAME"] = table_name
key_dict["TABLE_COMMENT"] = table_comment
key_dict["COLUMN_NAME"] = key_name
key_dict["LONG_ID"] = key_ref
all_key_list.append(key_dict)
return key_list
def generate_relationship(tenant_id,file_name):
global all_rel_ref_list, all_key_list
read_erwin_file(file_name, True)
if all_rel_ref_list is None or all_key_list is None:
return None
rel_list = []
rel_key_list = []
table_names = []
for key in all_key_list:
key_table_name = key.get("TABLE_NAME")
if table_names.count(key_table_name) <= 0:
table_names.append(key_table_name)
entity_dict = get_entity_mapping(tenant_id, table_names)
for item in all_rel_ref_list:
# schema = item.get("TABLE_SCHEMA")
table_name = item.get("TABLE_NAME")
# table_comment = item.get("TABLE_COMMENT")
col_name = item.get("COLUMN_NAME")
col_id = item.get("REL_ATTR_REF")
for key in all_key_list:
# key_schema = key.get("TABLE_SCHEMA")
key_table_name = key.get("TABLE_NAME")
if key_table_name is None:
continue
key_table_comment = key.get("TABLE_COMMENT")
key_name = key.get("COLUMN_NAME")
key_id = key.get("LONG_ID")
if key_id == col_id:
item["REF_TABLE_NAME"] = key_table_name
item["REF_TABLE_COMMENT"] = key_table_comment
item["REF_COLUMN_NAME"] = key_name
entity_fields_from = entity_dict.get(key_table_name)
frm_field = get_entity_field_by_name(entity_fields_from, key_name)
entity_fields_to = entity_dict.get(table_name)
to_field = get_entity_field_by_name(entity_fields_to, col_name)
if frm_field is None or to_field is None:
continue
entity_dict1 = {}
entity_dict1["rel_type"] = "1:N"
frm_f = frm_field.get("md_fields_id")
to_f = to_field.get("md_fields_id")
entity_dict1["to_entity_id"] = to_field.get("md_entity_id")
entity_dict1["to_field_id"] = to_f
entity_dict1["from_entity_id"] = frm_field.get("md_entity_id")
entity_dict1["from_field_id"] = frm_f
rel_name = to_field.get("md_entity_name") + " AND " + frm_field.get("md_entity_name") + " RELATION"
entity_dict1["md_entity_rel_desc"] = rel_name
entity_dict1["active_flag"] = "Y"
s1 = str(frm_f) if frm_f is not None else ""
s2 = "-" + str(to_f) if to_f is not None else ""
s = s1 + s2
if rel_key_list is not None and rel_key_list.count(s) > 0:
continue
rel_key_list.append(s)
rel_list.append(entity_dict1)
logger.info("generate entity relation from erwin,rel_name={}".format(rel_name))
break
return rel_list
def get_entity_field_by_name(fields, field_name):
if fields is None:
return None
for item in fields:
md_field_name = item.get("md_fields_name")
if field_name is not None and field_name == md_field_name:
return item
return None
def get_entity_mapping(tenant_id, table_names):
entities = mdi.query_entity_by_tenant(tenant_id, table_names)
entity_dict = {}
for item in entities:
md_entity_code = item.get("md_tables_name")
ent = entity_dict.get(md_entity_code)
if ent is None:
ls = []
ls.append(item)
entity_dict[md_entity_code] = ls
else:
ent.append(item)
return entity_dict
def generate_Tables(scObj):
schema = ""
table_name = ""
tables_dict = {}
try:
# table_name = scObj.Properties('Name').Value
table_name = scObj.Properties('Physical_Name').Value
except Exception as ex:
logger.warning("generate table name failed,obj={}".format(scObj))
return (schema, table_name, None)
# 取Definition属性的值
try:
scDefineName = scObj.Properties('Definition').Value
schema = scObj.Properties('Schema_Name').Value
except Exception as ex:
scDefineName = ''
# 对象名赋值
tables_dict["TABLE_NAME"] = table_name
table_comment = scDefineName
tables_dict["TABLE_COMMENT"] = table_comment
tables_dict["TABLE_SCHEMA"] = schema
# tables_list.append(tables_dict)
return (schema, table_name, table_comment, tables_dict)
# 产生属性对象
def generate_attributes(scAttrObjects, schema, table_name, table_comment, key_list, rel_only=False):
global all_rel_ref_list
columns_list = []
fields_list = []
is_null = None
for scAttrObj in scAttrObjects:
col_dict = {}
try:
scAttrName = scAttrObj.Properties('Physical_Name').Value
if scAttrName is None or len(scAttrName.strip()) <= 0 or scAttrName == "%AttName":
scAttrName = scAttrObj.Properties('Name').Value
except Exception as ex:
scAttrName = ''
continue
data_type, is_key, long_Id = None, None, None
try:
if rel_only:
if scAttrObj.Properties('Parent_Attribute_Ref') is not None:
parent_attr_ref = scAttrObj.Properties('Parent_Attribute_Ref').Value
parent_rel_ref = scAttrObj.Properties('Parent_Relationship_Ref').Value
rel_ref = generate_rel_infos(table_name, scAttrName, parent_rel_ref, parent_attr_ref)
all_rel_ref_list.append(rel_ref)
if rel_only:
continue
except:
parent_attr_ref = ""
try:
scAttrDefineName = scAttrObj.Properties('Definition').Value
except:
scAttrDefineName = ""
try:
data_type = scAttrObj.Properties('Physical_Data_Type').Value
# long_Id = scAttrObj.Properties('Long_Id').Value
if data_type is None or len(data_type.strip()) <= 0:
data_type = scAttrObj.Properties('Logical_Data_Type').Value
is_key = ''
if key_list is not None:
for pk_item in key_list:
if pk_item == scAttrName:
is_key = 'PRI'
break
is_null = scAttrObj.Properties('Null_Option_Type').Value
except Exception as ex:
data_type = ""
is_null = "Y"
is_key = ""
logger.info("generate_attributes exception,{}".format(ex))
# 对象名赋值
# scAttrObj.Properties('Physical_Name').Value = scAttrName
col_dict["TABLE_SCHEMA"] = schema
col_dict["TABLE_NAME"] = table_name
col_dict["TABLE_COMMENT"] = table_comment
col_dict["COLUMN_NAME"] = scAttrName
col_dict["COLUMN_COMMENT"] = scAttrDefineName
new_type, precision, scale = convert_numberic(data_type)
col_dict["DATA_TYPE"] = new_type
col_dict["NUMERIC_PRECISION"] = precision
col_dict["NUMERIC_SCALE"] = scale
# col_dict["LONG_ID"] = long_Id
is_null_flag = 'Y'
if is_null is not None and is_null == 1:
is_null_flag = 'N'
col_dict["IS_NULLABLE"] = is_null_flag
col_dict["COLUMN_KEY"] = is_key
columns_list.append(col_dict)
fields_list.append(scAttrName)
logger.info("fields mapping done,table_name={},fied_name_list={}".format(table_name, fields_list))
return columns_list
def generate_rel_infos(table_name, attr_name, parent_rel_ref, parent_attr_ref):
obj = {}
obj["TABLE_NAME"] = table_name
obj["COLUMN_NAME"] = attr_name
obj["REL_OBJ_REF"] = parent_rel_ref
obj["REL_ATTR_REF"] = parent_attr_ref
return obj
def convert_numberic(type):
new_type = type
precision = None
scale = None
if type is not None and type.count("(") > 0:
icount = type.index("(")
new_type = type[:icount]
if type.count(",") > 0:
i_d = type.index(",")
s = type[icount + 1:i_d]
precision = int(s.strip())
if type.count(")") > 0:
i_e = type.index(")")
s = type[i_d + 1:i_e]
try:
scale = int(s.strip())
except:
scale = 0
else:
if type.count(")") > 0:
i_e = type.index(")")
s = type[icount + 1:i_e]
try:
precision = int(s.strip())
except:
precision = 0
return new_type, precision, scale
##1.初始化表和字段信息,反向从erwin文件
def reverse_tables_columns(user_id, tenant_id, database_name, schema, file_name):
tables, columns = read_erwin_file(file_name)
re = mdr.intialize_md_tables_from_schema(user_id, tenant_id, database_name, schema, tables, columns)
return re
##2.初始化实体关系,反向从数据库的外键关系(前提:entity和fields等元数据已经生成)
def reverse_constraint(user_id, tenant_id, schema, file_name):
const_list = generate_relationship(tenant_id,file_name)
re = mdr.intialize_entity_rel_from_schema(user_id, tenant_id, schema, const_list, True)
return re
if __name__ == '__main__':
user = ur.get_user("admin")
user_id = user.get("user_id")
tenant_id = user.get("tenant_id")
schema = "common"
database_name = "mysql"
# 数据库类类型mysql/oracle/pb
db_type = 'mysql'
# ##### 1.初始化表结构从erWin文件(注意文件路径写法d:/downloads/eSpace_File/xxx.erwin)。
file_name = "d:/downloads/eSpace_File/123-new.erwin"
# re = reverse_tables_columns(user_id, tenant_id, database_name, schema, file_name)
# logger.info("all tables in[{}],re={}".format(schema, re))
# ####4.元数据对象生成Neo4J图数据库信息。
entity_code_list = ["xx_Lifecycle", "xx_Rel"]
entity_catagory = 'XXX' # 分类,增加一大类标签。
# entity_code_list = None 为None,则初始化全部实体。
# re = mdi.ini_entity_model_graph(tenant_id, entity_code_list, entity_catagory, schema)
|
from unittest import TestCase
try:
from functools import partialmethod
except ImportError:
# Partial method for Python 2.7 - https://gist.github.com/carymrobbins/8940382
from functools import partial
# noinspection PyPep8Naming
class partialmethod(partial):
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.func, instance,
*(self.args or ()), **(self.keywords or {}))
class BaseUnitTest(TestCase):
def run_test(self, method, output, **kwargs):
actual = method(**kwargs)
self.assertEqual(output, actual)
class KwargsToOutputDynamicTestsMetaClass(type):
def __new__(cls, name, bases, dict):
method = dict.get('run_test', BaseUnitTest.run_test)
for name, args in dict['tests'].items():
dict['test_%s' % (name, )] = partialmethod(method, dict['func'], args['output'], **args['kwargs'])
return type(name, bases, dict)
|
import judicious
judicious.register("http://127.0.0.1:5000")
# judicious.register("https://imprudent.herokuapp.com")
decision = judicious.intertemporal_choice(SS=1, LL=2, delay="1 day")
print(decision)
|
#!/usr/bin/env python3
import sys
import re
import json
from os import path
from lxml import etree
class AudioFileProcessor:
def __init__(self, inst):
tree = etree.parse(inst.preset)
pt = tree.xpath('/lmms-project/instrumenttracksettings')[0]
pt.tag = 'track'
pt.attrib['name'] = inst.name
elem = pt.xpath('instrumenttrack/instrument/audiofileprocessor')[0]
elem.attrib['src'] = inst.src
self.presettrack = pt
def track(self):
return self.presettrack
def setparam(self, param):
pass
def getparam(self):
pass
class Sf2Player:
params = {}
def __init__(self, inst):
tree = etree.parse(inst.preset)
pt = tree.xpath('/lmms-project/instrumenttracksettings')[0]
pt.tag = 'track'
pt.attrib['name'] = inst.name
elem = pt.xpath('instrumenttrack/instrument/sf2player')[0]
elem.attrib['src'] = inst.src
elem.attrib['patch'] = inst.patch
self.presettrack = pt
def track(self):
return self.presettrack
def setparam(self, param):
pass
def getparam(self):
pass
class Kicker:
def __init__(self, inst):
tree = etree.parse(inst.preset)
pt = tree.xpath('/lmms-project/instrumenttracksettings')[0]
pt.tag = 'track'
pt.attrib['name'] = inst.name
self.presettrack = pt
def track(self):
return self.presettrack
def setparam(self, param):
pass
def getparam(self):
pass
class Lmms:
presets = {}
unit = 12
steps = 16
quarterlength = 48
beattracks = 1
def __init__(self, project):
self.project = etree.parse(project)
def write(self, file):
self.project.write(file, encoding='utf-8', xml_declaration=True)
def collectpresets(self, file):
with open(file) as f:
for l in f:
l = l.strip()
s = re.sub('.*data/presets/', '', l)
s = re.sub('(.xpf|.xiz)', '', s)
nm = re.sub('/', '.', s)
absp = path.abspath(l)
self.presets[nm] = absp
def listpresets(self):
return list(self.presets.keys())
def addpreset(self, track, preset):
tree = etree.parse(self.presets.get(preset))
pt = tree.xpath(
'/lmms-project/instrumenttracksettings/instrumenttrack')[0]
track.append(pt)
return pt
def addbeatpreset(self, preset, name):
tracks = self.project.xpath('/lmms-project/song/trackcontainer/track[@type="1"]')
self.beattracks = len(tracks)
track = tracks[0]
t = track.xpath('bbtrack/trackcontainer')[0]
tree = etree.parse(self.presets.get(preset))
pt = tree.xpath('/lmms-project/instrumenttracksettings')[0]
pt.tag = 'track'
pt.attrib['name'] = name
t.append(pt)
return pt
def addtrack(self, name):
parent = self.project.xpath('/lmms-project/song/trackcontainer')[0]
parent.append(track)
return track
def addbeatinstrument(self, inst):
track = self.project.xpath('/lmms-project/song/trackcontainer/track[@type="1"]')[0]
subtrack = track.xpath('bbtrack/trackcontainer')[0]
subtrack.append(inst)
return pt
def findtrack(self, name):
track = self.project.xpath(
'/lmms-project/song/trackcontainer/track[@name="{}"]'.format(name))[0]
return track
def _addpattern(self, track, _type, pos):
pattern = etree.Element('pattern', pos=pos, muted='0', steps='16')
pattern.attrib['name'] = track.attrib['name']
pattern.attrib['type'] = _type
track.append(pattern)
return pattern
def addpattern(self, track):
return self._addpattern(track, '1', '0')
def addbeatpattern(self, track):
pos = 0
for i in range(self.beattracks):
self._addpattern(track, '0', str(int(pos)))
pos = pos + self.steps * self.unit
def _addbeatpattern(self, track):
pos = self.steps * self.unit * (self.beattracks - 1)
return self._addpattern(track, '0', str(int(pos)))
def addnotes(self, pattern, notes, pos, pitch, vol):
offset = self.steps * self.unit * pos
for n in notes:
if n['type'] == 'Note':
elem = etree.Element('note', key="0", pan="0", len="0", vol="0", pos="0")
elem.attrib['key'] = str(n['key'] + pitch)
elem.attrib['pos'] = str(int(offset + self.unit * n['pos'] * 4))
elem.attrib['len'] = str(int(self.unit * n['len'] * 4))
elem.attrib['vol'] = str(vol * 10)
pattern.append(elem)
elif n['type'] == 'Chord':
for k in n['keys']:
elem = etree.Element('note', key="0", pan="0", len="0", vol="140", pos="0")
elem.attrib['key'] = str(k + pitch)
elem.attrib['pos'] = str(int(offset + self.unit * n['pos'] * 4))
elem.attrib['len'] = str(int(self.unit * n['len'] * 4))
pattern.append(elem)
elif n['type'] == 'Measure':
offset = offset + (self.unit * self.steps)
def addbeatnote(self, pattern, offset):
elem = etree.Element('note', pos="0", len="-192", key="57", vol="100", pan="0")
elem.attrib['pos'] = str(int(self.quarterlength * offset))
pattern.append(elem)
def addbeatnotes(self, pattern, notes):
offset = 0
for n in notes:
if n == '1':
elem = etree.Element('note', pos="0", len="-192", key="57", vol="100", pan="0")
elem.attrib['pos'] = str(int(offset))
pattern.append(elem)
offset = offset + self.unit
def _addbbtrack(self, track):
t = etree.Element('bbtrack')
#self._addbbtrackcontainer(t)
track.append(t)
def _addbbtrackcontainer(self, track):
t = etree.Element('trackcontainer', width="640", x="610", y="5", maximized="0",
height="400", visible="0", type="bbtrackcontainer", minimized="0")
track.append(t)
def addbeattrack(self, name):
parent = self.project.xpath('/lmms-project/song/trackcontainer')[0]
track = etree.Element(
'track', type='1', name=name, muted='0', solo='0')
self._addbbtrack(track)
parent.append(track)
return track
def findbeattrack(self, name):
track = self.project.xpath(
'/lmms-project/song/trackcontainer/track[@name="{}"]'.format(name))[0]
return track
def addbbtco(self, track, offset, count):
for i in range(count):
pos = str(int((i + offset) * self.unit * self.steps))
bbtco = etree.Element(
'bbtco', color="4286611584", pos=pos, name="", muted="0", len="192", usestyle="1")
track.append(bbtco)
def addautomationtrack(self, name):
pass
def findautomationtrack(self, name):
pass
def addautomationpattern(self, name):
pass
def findautomationpattern(self, name):
pass
def removetrack(self, track):
root = self.project.getroot()
root.remove(track)
def changesteps(self, track, steps):
self.steps = steps
pass
def muted(self, track, val):
track.attrib['muted'] = str(val)
def changebpm(self, bpm):
head = self.project.xpath('/lmms-project/head')[0]
head.attrib['bpm'] = str(bpm)
def changevol(self, vol):
head = self.project.xpath('/lmms-project/head')[0]
head.attrib['mastervol'] = str(vol)
def changepitch(self, pitch):
head = self.project.xpath('/lmms-project/head')[0]
head.attrib['masterpitch'] = str(pitch)
def getbeatpattern(self, beattrack, presettrack):
tracks = self.project.xpath('/lmms-project/song/trackcontainer/track[@type="1"]')
index = tracks.index(beattrack)
patterns = presettrack.xpath('pattern')
return patterns[index]
def getdefaultbeattrack(self):
return self.project.xpath('/lmms-project/song/trackcontainer/track[@type="1"]')[0]
def _updatebeatpatterns(self):
dftrack = self.getdefaultbeattrack()
tracks = dftrack.xpath('bbtrack/trackcontainer/track')
pt = []
for track in tracks:
pt.append(self._addbeatpattern(track))
return pt
def addbeatbaseline(self, name):
parent = self.project.xpath('/lmms-project/song/trackcontainer')[0]
track = etree.Element('track', type='1', name=name, muted='0', solo='0')
self._addbbtrack(track)
parent.append(track)
self.beattracks = self.beattracks + 1
# update patterns
patterns = self._updatebeatpatterns()
return (track, patterns)
def addinstrument(self, inst):
plug = None
if inst.plugin == 'Sf2Player':
plug = Sf2Player(inst)
elif inst.plugin == 'AudioFileProcessor':
plug = AudioFileProcessor(inst)
elif inst.plugin == 'Kicker':
plug = Kicker(inst)
attrib = {}
track = plug.track()
if inst.beats:
parent = self.project.xpath('/lmms-project/song/trackcontainer/track/bbtrack/trackcontainer')[0]
pattern = self.addbeatpattern(track)
else:
parent = self.project.xpath('/lmms-project/song/trackcontainer')[0]
pattern = self.addpattern(track)
parent.append(track)
attrib['track'] = track
attrib['pattern'] = pattern
return attrib
def addpreset(self, preset):
pass
def addsample(self, sample):
pass
|
import torch.nn as nn
import math
from .derive_blocks import derive_blocks
from .operations import Conv1_1
class ImageNetModel(nn.Module):
def __init__(self, net_config):
super(ImageNetModel, self).__init__()
self.blocks, parsed_net_config = derive_blocks(net_config)
self.blocks.append(Conv1_1(parsed_net_config[-1][0][1], 1280))
self.global_pooling = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Linear(1280, 1000)
self.init_model()
self.set_bn_param(0.1, 1e-3)
def forward(self, x, stat=None):
for block in self.blocks:
x = block(x)
out = self.global_pooling(x)
logits = self.classifier(out.view(out.size(0),-1))
return logits
def init_model(self, model_init='he_fout', init_div_groups=True):
for m in self.modules():
if isinstance(m, nn.Conv2d):
if model_init == 'he_fout':
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt(2. / n))
elif model_init == 'he_fin':
n = m.kernel_size[0] * m.kernel_size[1] * m.in_channels
if init_div_groups:
n /= m.groups
m.weight.data.normal_(0, math.sqrt(2. / n))
else:
raise NotImplementedError
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm1d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def set_bn_param(self, bn_momentum, bn_eps):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.momentum = bn_momentum
m.eps = bn_eps
return
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model import no_value_fields
from frappe.translate import set_default_language
from frappe.utils import cint, today
from frappe.utils.momentjs import get_all_timezones
from frappe.twofactor import toggle_two_factor_auth
class SystemSettings(Document):
def validate(self):
enable_password_policy = cint(self.enable_password_policy) and True or False
minimum_password_score = cint(getattr(self, 'minimum_password_score', 0)) or 0
if enable_password_policy and minimum_password_score <= 0:
frappe.throw(_("Please select Minimum Password Score"))
elif not enable_password_policy:
self.minimum_password_score = ""
for key in ("session_expiry", "session_expiry_mobile"):
if self.get(key):
parts = self.get(key).split(":")
if len(parts)!=2 or not (cint(parts[0]) or cint(parts[1])):
frappe.throw(_("Session Expiry must be in format {0}").format("hh:mm"))
if self.enable_two_factor_auth:
if self.two_factor_method=='SMS':
if not frappe.db.get_value('SMS Settings', None, 'sms_gateway_url'):
frappe.throw(_('Please setup SMS before setting it as an authentication method, via SMS Settings'))
toggle_two_factor_auth(True, roles=['All'])
else:
self.bypass_2fa_for_retricted_ip_users = 0
self.bypass_restrict_ip_check_if_2fa_enabled = 0
frappe.flags.update_last_reset_password_date = False
if (self.force_user_to_reset_password and
not cint(frappe.db.get_single_value("System Settings", "force_user_to_reset_password"))):
frappe.flags.update_last_reset_password_date = True
def on_update(self):
for df in self.meta.get("fields"):
if df.fieldtype not in no_value_fields and self.has_value_changed(df.fieldname):
frappe.db.set_default(df.fieldname, self.get(df.fieldname))
if self.language:
set_default_language(self.language)
frappe.cache().delete_value('system_settings')
frappe.cache().delete_value('time_zone')
frappe.local.system_settings = {}
if frappe.flags.update_last_reset_password_date:
update_last_reset_password_date()
def update_last_reset_password_date():
frappe.db.sql(""" UPDATE `tabUser`
SET
last_password_reset_date = %s
WHERE
last_password_reset_date is null""", today())
@frappe.whitelist()
def load():
if not "System Manager" in frappe.get_roles():
frappe.throw(_("Not permitted"), frappe.PermissionError)
all_defaults = frappe.db.get_defaults()
defaults = {}
for df in frappe.get_meta("System Settings").get("fields"):
if df.fieldtype in ("Select", "Data"):
defaults[df.fieldname] = all_defaults.get(df.fieldname)
return {
"timezones": get_all_timezones(),
"defaults": defaults
}
|
# Generates names from different languages using a transformer.
# The data loading part of this code was copied and adapted from
# https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html
# The transformer code was based on:
# http://peterbloem.nl/blog/transformers
# %%
from __future__ import unicode_literals, print_function, division
from IPython import get_ipython
from io import open
import glob
import os
import unicodedata
import string
import random
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
from torch.distributions import Categorical
get_ipython().run_line_magic('matplotlib', 'inline')
# %%
all_letters = string.ascii_letters + " .,;'-"
START = len(all_letters)
END = len(all_letters) + 1
n_letters = len(all_letters) + 2
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
category_lines = {}
all_categories = []
for filename in findFiles('/opt/data/pytorch-tutorial-data/names/*.txt'):
category = os.path.splitext(os.path.basename(filename))[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
if n_categories == 0:
raise RuntimeError('Data not found. Make sure that you downloaded data '
'from https://download.pytorch.org/tutorial/data.zip and extract it to '
'the current directory.')
print('# categories:', n_categories, all_categories)
print(unicodeToAscii("O'Néàl"))
# %%
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
def categoryTensor(category):
return torch.tensor(all_categories.index(category))
def line2tensor(line):
return torch.tensor([START] + [all_letters.find(letter) for letter in line] + [END])
def randomTrainingExample():
category, line = randomTrainingPair()
category_tensor = categoryTensor(category)
line_tensor = line2tensor(line)
input_line_tensor = line_tensor[0:-1]
target_line_tensor = line_tensor[1:]
return category_tensor, input_line_tensor, target_line_tensor
def make_batch(batch_sz):
samples = []
for i in range(batch_sz):
samples.append(randomTrainingExample())
max_len = torch.tensor([len(s[1]) for s in samples]).max()
batch_cat = torch.cat([s[0].unsqueeze(0) for s in samples])
batch_input = torch.full((max_len, batch_sz), END, dtype=torch.long)
batch_target = torch.full((max_len, batch_sz), END, dtype=torch.long)
for i, s in enumerate(samples):
batch_input[0:len(s[1]), i] = s[1]
batch_target[0:len(s[2]), i] = s[2]
return batch_cat, batch_input, batch_target
# %%
class SelfAttention(nn.Module):
def __init__(self, k, nheads=1, causal_mask=False):
super().__init__()
self.k = k
self.nheads = nheads
self.causal_mask = causal_mask
self.key = nn.Linear(k, k * nheads)
self.query = nn.Linear(k, k * nheads)
self.values = nn.Linear(k, k * nheads)
self.out = nn.Linear(k * nheads, k)
def forward(self, x):
# x is [t, b, k]
t, b = x.shape[0:2]
h = self.nheads
# x is [b, t, k]
x = x.transpose(0, 1).contiguous()
# [b, t, h * k] -> [b * h, t, k]
key = self.key(x).view(b, t, h, k).transpose(1, 2).contiguous().view(-1, t, k)
query = self.query(x).view(b, t, h, k).transpose(1, 2).contiguous().view(-1, t, k)
values = self.values(x).view(b, t, h, k).transpose(1, 2).contiguous().view(-1, t, k)
raw_att = torch.bmm(key, query.transpose(1, 2)) / (self.k ** 0.5)
if self.causal_mask:
mask_inds = torch.triu_indices(t, t, offset=1)
raw_att[:, mask_inds[0, :], mask_inds[1, :]] = float('-inf')
att = nn.functional.softmax(raw_att, dim=2)
out = torch.bmm(att, values)
# [b * h, t, k] -> [b, h, t, k] -> [b, t, h, k]
out = out.view(b, h, t, k).transpose(1, 2)
# [b, t, h, k] -> [t, b, h, k] -> [t, b, h * k]
out = out.transpose(0, 1).contiguous().view(t, b, -1)
out = self.out(out)
return out
class TransformerBlock(nn.Module):
def __init__(self, k, nheads=1, hidden_factor=4, causal_mask=False):
super().__init__()
self.sa = SelfAttention(k, nheads=nheads, causal_mask=causal_mask)
self.ln1 = nn.LayerNorm(k)
self.mlp = nn.Sequential(
nn.Linear(k, k * hidden_factor),
nn.ReLU(),
nn.Linear(k * hidden_factor, k),
)
self.ln2 = nn.LayerNorm(k)
def forward(self, x):
# x is [t, b, k]
sa = self.sa(x)
x = x + sa
x = self.ln1(x)
mlp = self.mlp(x)
x = x + mlp
x = self.ln2(x)
return x
class Transformer(nn.Module):
def __init__(self, ncats, ntokens, nblocks=2, nheads=1, max_len=20, k=32,
hidden_factor=4, causal_mask=False):
super().__init__()
self.max_len = max_len
self.pos_embed = nn.Embedding(max_len, k)
self.cat_embed = nn.Embedding(ncats, k)
self.token_embed = nn.Embedding(ntokens, k)
self.transformer_blocks = nn.Sequential(*[
TransformerBlock(k, nheads, hidden_factor, causal_mask=causal_mask)
for i in range(nblocks)])
self.classifier = nn.Linear(k, ntokens)
def forward(self, cat, input):
# cat is [b]
# input is [t, b]
seq_len = input.shape[0]
if seq_len > self.max_len:
raise Exception('Input is longer than max_len')
batch_sz = input.shape[1]
pos = torch.arange(0, seq_len).unsqueeze(1).repeat(1, batch_sz) # [t, b]
cat = cat.unsqueeze(0).repeat(seq_len, 1) # [t, b]
x = self.pos_embed(pos) + self.cat_embed(cat) + self.token_embed(input)
x = self.transformer_blocks(x)
logits = self.classifier(x)
return logits
# %%
ncats = n_categories
ntokens = n_letters
nblocks = 8
nheads = 8
max_len = 30
k = 128
model = Transformer(
ncats, ntokens, nblocks=nblocks, nheads=nheads, max_len=max_len, k=k,
causal_mask=True)
model.train()
nsteps = 10000
log_every = 100
batch_sz = 4
lr = 1e-3
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
sum_loss = 0.0
for step in range(nsteps):
model.zero_grad()
cat, input, target = make_batch(batch_sz)
output = model(cat, input)
loss = nn.functional.cross_entropy(output.view(-1, ntokens), target.view(-1))
loss.backward()
optimizer.step()
sum_loss += loss.item()
if step != 0 and step % log_every == 0:
print(f'step: {step} / loss: {sum_loss / log_every}')
sum_loss = 0.0
model = model.eval()
# %%
def get_sample(model, cat):
cat = categoryTensor(cat)
input = [START]
with torch.no_grad():
while True:
output = model(cat.unsqueeze(0), torch.tensor(input).unsqueeze(1))
output_dist = nn.functional.softmax(output[-1, 0, :])
sample = input[1:]
output = Categorical(output_dist).sample()
if output == END:
break
input.append(output)
sample = input[1:]
return ''.join([all_letters[s.item()] for s in sample])
def get_samples(model, cat, nsamples):
return [get_sample(model, cat) for i in range(nsamples)]
def print_samples():
for cat in all_categories:
samples = get_samples(model, cat, 10)
print(cat)
print(samples)
print()
print_samples()
# %%
|
"""I/O utilities for SSSOM."""
import logging
from pathlib import Path
from typing import Optional, TextIO, Union
from .context import (
get_default_metadata,
set_default_license,
set_default_mapping_set_id,
)
from .parsers import get_parsing_function, read_sssom_table, split_dataframe
from .typehints import Metadata
from .util import raise_for_bad_path, read_metadata
from .writers import get_writer_function, write_table, write_tables
def convert_file(
input_path: str,
output: TextIO,
output_format: Optional[str] = None,
) -> None:
"""Convert a file.
:param input_path: The path to the input SSSOM tsv file
:param output: The path to the output file. If none is given, will default to using stdout.
:param output_format: The format to which the the SSSOM TSV should be converted.
"""
raise_for_bad_path(input_path)
doc = read_sssom_table(input_path)
write_func, fileformat = get_writer_function(
output_format=output_format, output=output
)
# TODO cthoyt figure out how to use protocols for this
write_func(doc, output, serialisation=fileformat) # type:ignore
def parse_file(
input_path: str,
output: TextIO,
input_format: Optional[str] = None,
metadata_path: Optional[str] = None,
prefix_map_mode: Optional[str] = None,
clean_prefixes: bool = True,
) -> None:
"""Parse an SSSOM metadata file and write to a table.
:param input_path: The path to the input file in one of the legal formats, eg obographs, aligmentapi-xml
:param output: The path to the output file.
:param input_format: The string denoting the input format.
:param metadata_path: The path to a file containing the sssom metadata (including prefix_map)
to be used during parse.
:param prefix_map_mode: Defines whether the prefix map in the metadata should be extended or replaced with
the SSSOM default prefix map. Must be one of metadata_only, sssom_default_only, merged
:param clean_prefixes: If True (default), records with unknown prefixes are removed from the SSSOM file.
"""
raise_for_bad_path(input_path)
metadata = get_metadata_and_prefix_map(
metadata_path=metadata_path, prefix_map_mode=prefix_map_mode
)
metadata = set_default_mapping_set_id(metadata)
metadata = set_default_license(metadata)
parse_func = get_parsing_function(input_format, input_path)
doc = parse_func(
input_path, prefix_map=metadata.prefix_map, meta=metadata.prefix_map
)
if clean_prefixes:
# We do this because we got a lot of prefixes from the default SSSOM prefixes!
doc.clean_prefix_map()
write_table(doc, output)
def validate_file(input_path: str) -> bool:
"""Validate the incoming SSSOM TSV according to the SSSOM specification.
:param input_path: The path to the input file in one of the legal formats, eg obographs, aligmentapi-xml
:returns: True if valid SSSOM, false otherwise.
"""
try:
read_sssom_table(file_path=input_path)
return True
except Exception as e:
logging.exception("The file is invalid", e)
return False
def split_file(input_path: str, output_directory: Union[str, Path]) -> None:
"""Split an SSSOM TSV by prefixes and relations.
:param input_path: The path to the input file in one of the legal formats, eg obographs, aligmentapi-xml
:param output_directory: The directory to which the split file should be exported.
"""
raise_for_bad_path(input_path)
msdf = read_sssom_table(input_path)
splitted = split_dataframe(msdf)
write_tables(splitted, output_directory)
def get_metadata_and_prefix_map(
metadata_path: Optional[str] = None, prefix_map_mode: Optional[str] = None
) -> Metadata:
"""
Load SSSOM metadata from a file, and then augments it with default prefixes.
:param metadata_path: The metadata file in YAML format
:param prefix_map_mode: one of metadata_only, sssom_default_only, merged
:return: a prefix map dictionary and a metadata object dictionary
"""
if metadata_path is None:
return get_default_metadata()
if prefix_map_mode is None:
prefix_map_mode = "metadata_only"
prefix_map, metadata = read_metadata(metadata_path)
# TODO reduce complexity by flipping conditionals
# and returning eagerly (it's fine if there are multiple returns)
if prefix_map_mode != "metadata_only":
meta_sssom, prefix_map_sssom = get_default_metadata()
if prefix_map_mode == "sssom_default_only":
prefix_map = prefix_map_sssom
elif prefix_map_mode == "merged":
for prefix, uri_prefix in prefix_map_sssom.items():
if prefix not in prefix_map:
prefix_map[prefix] = uri_prefix
return Metadata(prefix_map=prefix_map, metadata=metadata)
|
from django.apps import AppConfig
class PappuConfig(AppConfig):
name = 'pappu'
|
import sqlite3
db = "tweet-data.db"
conn = sqlite3.connect(db)
c = conn.cursor()
#use this try block when new table added in db
#added all previous table name to add new table
try:
c.execute("drop table lang_data")
c.execute("drop table trend_data")
c.execute("drop table twt_data")
c.execute("drop table country_data")
c.execute("drop table love_data")
c.execute("drop table pro_lang_data")
except:
pass
cmd = "CREATE TABLE lang_data (language TEXT, top_lang TEXT, datetime TEXT)"
c.execute(cmd)
cmd = "CREATE TABLE trend_data (trend TEXT, trend_id1 TEXT, trend_id2 TEXT, trend_id3 TEXT, datetime TEXT)"
c.execute(cmd)
cmd = "CREATE TABLE twt_data (top_tweet TEXT, datetime TEXT)"
c.execute(cmd)
cmd = "CREATE TABLE country_data (country TEXT, datetime TEXT)"
c.execute(cmd)
cmd = "CREATE TABLE love_data (love_words INT, swear_words INT, datetime TEXT)"
c.execute(cmd)
cmd = "CREATE TABLE pro_lang_data (pro_lang TEXT, datetime TEXT)"
c.execute(cmd)
conn.commit()
conn.close()
|
"""Define Jinja2 filters used in the templates compilation."""
from pyrobuf import parse_proto
from schematics.types import StringType, IntType, ModelType
from protobuf_schematics.types import BytesType, EnumType
class FieldConverter(object):
PROTOBUF_FIELD_TO_SCHEMATICS_FIELD = {
'string': StringType,
'uint32': IntType,
'uint64': IntType,
'bytes': BytesType,
'message': ModelType,
'enum': EnumType,
}
@classmethod
def convert(cls, field): # type: (parse_proto.Parser.Field) -> str
"""Convert a field to it's Schematics representation."""
token_type_to_handler = {
'MAP_FIELD': cls._map_field_converter,
'FIELD': cls._regular_field_converter,
}
try:
return token_type_to_handler[field.token_type](field)
except KeyError:
raise ValueError('Unsupported Pyrobuf field token type: {}'.format(field.token_type))
@classmethod
def _get_pyrobuf_field_repr(cls, field):
"""Return the Schematics string representation of a Pyrobuf field type."""
try:
schematics_type = cls.PROTOBUF_FIELD_TO_SCHEMATICS_FIELD[field.type]
except KeyError:
raise ValueError('Unsupported Pyrobuf field type: {}. Field name: {}'.format(field.type, field.name))
type_name = schematics_type.__name__
arg = ''
if schematics_type == ModelType:
arg = field.message_name
elif schematics_type == EnumType:
arg = field.enum_def.name
return '{}({})'.format(type_name, arg)
@classmethod
def _regular_field_converter(cls, field): # type: (parse_proto.Parser.Field) -> str
"""Convert regular field to it's Schematics representation."""
field_repr = cls._get_pyrobuf_field_repr(field)
if field.modifier == 'repeated':
field_repr = 'ListType({})'.format(field_repr)
return '{} = {}'.format(field.name, field_repr)
@classmethod
def _map_field_converter(cls, field): # type: (parse_proto.Parser.MapField) -> str
"""Convert map field to it's Schematics representation."""
value = cls._get_pyrobuf_field_repr(field)
if field.key_type == 'string':
key = 'str'
elif field.key_type == 'uint32' or field.key_type == 'uint64':
key = 'int'
elif field.key_type == 'bytes':
key = 'str'
else:
raise ValueError(
'Unsupported type for map field key. Key type: {}, Field name: {}'.format(field.type, field.name)
)
return '{} = DictType({}, {})'.format(field.name, value, key)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import json
import pytz
import urllib.request
sys.path.insert(0, '/opt/python-verisure/')
import verisure
import pickle
from datetime import datetime
from tzlocal import get_localzone
debug = False
if debug:
print("Start testing temp hum dev")
try:
execfile("/etc/domoticz/scripts.conf")
except:
exec(open("/etc/domoticz/scripts.conf").read())
try:
f = open(mypagesSession, 'rb')
myPages = pickle.load(f)
f.close()
except:
myPages = verisure.Session(email, verisurepass)
myPages.login()
f = open(mypagesSession, 'wb')
pickle.dump(myPages, f)
f.close()
if debug:
print("Loading file failed.")
#Get overview
try:
dev = myPages.get_overview()
except:
myPages = verisure.Session(email, verisurepass)
myPages.login()
f = open(mypagesSession, 'wb')
pickle.dump(myPages, f)
f.close()
dev = myPages.get_overview()
if debug:
print("Session was timed out")
#Climate
for i in dev['climateValues']:
if debug:
print("time: " + i['time'] )
print("location: " + i['deviceArea'] )
print("serial: " + i['deviceLabel'] )
print("temperature: " + str(i['temperature']))
if 'humidity' in i:
if debug:
print("humidity: " + str(i['humidity']))
if i['humidity'] < 20:
comf = 2
if i['humidity'] >= 20 and i['humidity'] <= 35:
comf = 0
if i['humidity'] > 35 and i['humidity'] <= 75:
comf = 1
if i['humidity'] > 75:
comf = 3
url = baseurl + "?type=command¶m=udevice&idx=" + climate[i['deviceArea']] + "&nvalue=0&svalue=" + str(i['temperature']) + ";" + str(i['humidity']) + ";" + str(comf)
else:
url = baseurl + "?type=command¶m=udevice&idx=" + climate[i['deviceArea']] + "&nvalue=0&svalue=" + str(i['temperature'])
if debug:
print('IDX: ' + climate[i['deviceArea']])
print('URL: ' + url)
request = urllib.request.Request(baseurl + "?type=devices&rid=" + climate[i['deviceArea']])
request.add_header('Authorization', 'Basic ' + auth)
r = urllib.request.urlopen(request)
dev = json.loads(r.read().decode(r.info().get_param('charset') or 'utf-8'))
domlastupdate = datetime.strptime(dev['result'][0]['LastUpdate'], '%Y-%m-%d %H:%M:%S')
domlastupdate = domlastupdate.replace(tzinfo=get_localzone())
verilastupdate = datetime.strptime(i['time'][:-5], '%Y-%m-%dT%H:%M:%S')
verilastupdate = verilastupdate.replace(tzinfo=pytz.UTC)
verilastupdate = verilastupdate.astimezone(get_localzone())
if debug:
print("dom: " + str(domlastupdate))
print("ver: " + str(verilastupdate))
if verilastupdate > domlastupdate:
if debug:
print("update domoticz")
request = urllib.request.Request(url)
request.add_header('Authorization', 'Basic ' + auth)
r = urllib.request.urlopen(request)
if debug:
print("Status code: " + str(r.getcode()))
if r.getcode() != 200:
print("Error updating temp in Domoticz. HTTP code: " + str(r.getcode()))
if debug:
print("End testing temp hum dev")
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------------------------------------------------------------------------------------
# COPERNICUS STUDY PROJECT #
# SUMMER SEMESTER 2017 #
# CREATED BY TEAM B - Cristhian Eduardo Murcia Galeano, Diego Armando Morales Cepeda, Jeison Orlando Londoño Espinosa, Mina Karamesouti and Sangeetha Shankar#
# INSTITUTE FOR GEOINFORMATICS (IFGI) #
# UNIVERSITY OF MUENSTER, GERMANY #
# LAST UPDATED ON 28 AUGUST 2017 #
# ------------------------------------------------------------------------------------------------------------------------------------------------------------
import json
import os
import subprocess
import sys
import traceback
import boto3
import time
def connect():
configuration = getDefaultConfigurationFile()
os.system("aws configure set AWS_ACCESS_KEY_ID " + configuration["AWS_ACCESS_KEY_ID"])
os.system("aws configure set AWS_SECRET_ACCESS_KEY " + configuration["AWS_SECRET_ACCESS_KEY"])
os.system("aws configure set default.region " + configuration["default.region"])
def getDefaultConfigurationFile():
return getConfigurationFile("C:/grpB_scripts/configuration.json")
def getConfigurationFile(jsonPath):
with open(jsonPath, 'r') as outfile:
conf = json.load(outfile)
return conf
def getNotificationIDAndResourceName():
sqs = boto3.client('sqs')
queue_url = 'https://sqs.eu-central-1.amazonaws.com/837005286527/detect_water_bodies_queue'
#water-detection-image-queue
# Receive message from SQS queue
response = sqs.receive_message(
QueueUrl=queue_url,
AttributeNames=[
'SentTimestamp'
],
MaxNumberOfMessages=1,
MessageAttributeNames=[
'All'
],
VisibilityTimeout=0,
WaitTimeSeconds=0
)
message = response['Messages'][0]
receipt_handle = message['ReceiptHandle']
records = json.loads(message["Body"])["Records"]
record = records[0]
filename = record["s3"]["object"]["key"]
attempts = 0
if "attempts" in record["s3"]["object"]:
attempts = int(record["s3"]["object"]["attempts"])
return receipt_handle, filename, attempts
# this function is used while converting Img files (zipped) to Tif files
def getNextFilename(bucketName):
configuration = getDefaultConfigurationFile()
META_DATA_STATUS_KEY = configuration["META_DATA_STATUS_KEY"]
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketName)
for key in bucket.objects.all():
newKey=key.key
foldername = newKey.split('/',1)[0]
if (newKey.split('/',1)[1]!="" and foldername=="preprocessed-images"):
file = s3.Object(bucketName, key.key)
if ('meta_data_status_key' not in file.metadata and key.size<5368709120):
# We skip files larger than 5 GigaBytes since metadata of these files cannot be updated due to some unknown reasons (the script crashes)
return file.key
return None
# this function is used in the water detection process
def getNextTifFilename(bucketName):
configuration = getDefaultConfigurationFile()
META_DATA_STATUS_KEY = configuration["META_DATA_STATUS_KEY"]
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketName)
for key in bucket.objects.all():
newKey=key.key
foldername = newKey.split('/',1)[0]
if (newKey.split('/',1)[1]!="" and foldername=="preprocessed-images-tif"):
file = s3.Object(bucketName, key.key)
META = file.metadata
if ('meta_data_status_key' not in file.metadata or META['meta_data_status_key']=='Retry'):
return file.key
return None
def deleteMessage(receipt_handle):
sqs = boto3.client('sqs')
queue_url = 'https://sqs.eu-central-1.amazonaws.com/837005286527/detect_water_bodies_queue'
sqs.delete_message(
QueueUrl=queue_url,
ReceiptHandle=receipt_handle
)
def downloadZipFile(bucketName, FolderForInput, NameOfZipFile):
s3 = boto3.resource('s3')
mybucket = s3.Bucket(bucketName)
for i in mybucket.objects.all():
if i.key.startswith('preprocessed-images'):
filename = i.key.rsplit('/',1)[1]
if filename==NameOfZipFile:
print(NameOfZipFile)
mybucket.download_file(i.key,FolderForInput+NameOfZipFile)
def downloadTifFile(bucketName, FolderForInput, NameOfTifFile):
s3 = boto3.resource('s3')
mybucket = s3.Bucket(bucketName)
downloadedFilesList = []
for i in mybucket.objects.all():
if i.key.startswith('preprocessed-images-tif/'+NameOfTifFile):
filename = i.key.rsplit('/',1)[1]
mybucket.download_file(i.key,FolderForInput+filename)
downloadedFilesList.append(i.key)
return downloadedFilesList
def uploadProcessedData(FolderForOutput,bucketName):
s3 = boto3.resource('s3')
mybucket = s3.Bucket(bucketName)
filesInFolder = os.listdir(FolderForOutput)
filesInFolder = [FolderForOutput+i for i in filesInFolder]
for i in filesInFolder:
filename = i.rsplit('/',1)[1]
if(filename.startswith('p_') and not filename.endswith('lock')):
mybucket.upload_file(i, "processed-images/"+filename)
def uploadConvertedData(FolderName,bucketName):
s3 = boto3.resource('s3')
mybucket = s3.Bucket(bucketName)
filesInFolder = os.listdir(FolderName+"VH")
filesInFolder = [FolderName+"VH/"+i for i in filesInFolder]
for i in filesInFolder:
filename = i.rsplit('/',1)[1]
mybucket.upload_file(i, "preprocessed-images-tif/"+filename)
filesInFolder = os.listdir(FolderName+"VV")
filesInFolder = [FolderName+"VV/"+i for i in filesInFolder]
for i in filesInFolder:
filename = i.rsplit('/',1)[1]
mybucket.upload_file(i, "preprocessed-images-tif/"+filename)
def getFileMetadata(bucketName, filename, key):
s3 = boto3.resource("s3")
file = s3.Object(bucketName, filename)
if key is not file.metadata:
return None
return file.metadata[key]
def updateFileMetadata(bucketName, filename, metadata={}):
s3 = boto3.resource("s3")
file = s3.Object(bucketName, filename)
file.metadata.update(metadata)
file.copy_from(CopySource={"Bucket": bucketName, "Key": filename}, Metadata=file.metadata,
MetadataDirective="REPLACE")
def deleteErrorFiles(bucketName):
configuration = getDefaultConfigurationFile()
META_DATA_STATUS_KEY = configuration["META_DATA_STATUS_KEY"]
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketName)
ZipFileToDelete = []
for key in bucket.objects.all():
newKey=key.key
foldername = newKey.split('/',1)[0]
if (newKey.split('/',1)[1]!="" and foldername=="preprocessed-images-tif"):
file = s3.Object(bucketName, newKey)
META = file.metadata
if 'meta_data_status_key' in file.metadata:
if META['meta_data_status_key'] == 'Out of AOI':
valueToBeAdded = ((newKey.split('/',1)[1]).split('.',1)[0]).rsplit('_',1)[0]
valueToBeAdded = "preprocessed-images/"+valueToBeAdded+".zip"
ZipFileToDelete.append(valueToBeAdded)
bucket.delete_objects(Delete={'Objects': [{'Key': newKey}]})
print("\nDeleted "+str(newKey))
ZipFileToDelete = set(ZipFileToDelete)
for i in ZipFileToDelete:
bucket.delete_objects(Delete={'Objects': [{'Key': i}]})
print("\nDeleted "+str(i))
return None
def ResetProcessingStatus(bucketName):
configuration = getDefaultConfigurationFile()
META_DATA_STATUS_KEY = configuration["META_DATA_STATUS_KEY"]
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketName)
for key in bucket.objects.all():
newKey=key.key
foldername = newKey.split('/',1)[0]
if (newKey.split('/',1)[1]!="" and foldername=="preprocessed-images-tif"):
file = s3.Object(bucketName, newKey)
META = file.metadata
if 'meta_data_status_key' in file.metadata:
if META['meta_data_status_key'] == 'Processing':
print(newKey)
updateFileMetadata(bucketName, newKey, {"meta_data_status_key": "Retry"})
return None
def getNumberOfFiles(bucketName):
configuration = getDefaultConfigurationFile()
META_DATA_STATUS_KEY = configuration["META_DATA_STATUS_KEY"]
s3 = boto3.resource("s3")
bucket = s3.Bucket(bucketName)
preprocessedZipCount = 0
preprocessedTifCount = 0
waterDetectedTifCount = 0
BigFiles = 0
print("\nYet to be processed : \n")
for key in bucket.objects.all():
newKey=key.key
foldername = newKey.split('/',1)[0]
if (newKey.split('/',1)[1]!="" and foldername=="preprocessed-images"):
preprocessedZipCount = preprocessedZipCount + 1
file = s3.Object(bucketName, newKey)
if 'meta_data_status_key' not in file.metadata:
print(newKey)
if key.size>5368709120:
BigFiles = BigFiles + 1
if (newKey.split('/',1)[1]!="" and foldername=="preprocessed-images-tif"):
preprocessedTifCount = preprocessedTifCount + 1
file = s3.Object(bucketName, newKey)
if 'meta_data_status_key' not in file.metadata:
print(newKey)
if (newKey.split('/',1)[1]!="" and foldername=="processed-images"):
waterDetectedTifCount = waterDetectedTifCount + 1
print("\nNumber of Pre-processed Zip Files : "+str(preprocessedZipCount))
print("\nNumber of Pre-processed Tif Files : "+str(preprocessedTifCount/6))
print("\nNumber of Processed Tif Files : "+str(waterDetectedTifCount/5))
print("\nNumber of BigFiles : "+ str(BigFiles))
return None
def extractDateFromFileName(key):
return key[37:45]
def extractDateFromTifFileName(key):
return key[41:49]
|
#!/bin/python
# Python module to generate regional plot for Regional Mitigation paper
# All on one plot
# ./MITIGATION_Paper_Regions_plot.py N All 0:6:11:12:16 4 7 2
# Split into several plots
# ./MITIGATION_Paper_Regions_plot_CH4_emissions.py N RCP19 26:1:23:25:2:22:17:10:11:21:20:3 0:6:11:12:16 4 3 2
# ./MITIGATION_Paper_Regions_plot_CH4_emissions.py N RCP19 8:7:13:24:4:9:5:19:6:0:15:18 0:6:11:12:16 4 3 2
# ./MITIGATION_Paper_Regions_plot_CH4_emissions.py N RCP19 8:7:13:24:4:9:5:19:6:14:16:12 0:6:11:12:16 4 3 2
# ESD Paper, April 2020
# ./MITIGATION_Paper_Regions_plot_CH4_emissions.py N RCP19 26:1:23:9:17:3:0:9:5:15:18:12 0:6:11:12:16 4 3 2
# Latex version, January 2021
# ./MITIGATION_Paper_Regions_plot_CH4_emissions.py N RCP19 26:1:23:9:17:3:0:9:5:15:18:12 0:6:11:12:16 4 3 2 pdf 20210102
# Garry Hayman
# Centre for Ecology and Hydrology
# June 2018
# Contains
import os
import sys
import numpy as np
import datetime
import plot_functions
DEBUG = sys.argv[1]
RCP_OPT = sys.argv[2]
REGIONS = sys.argv[3]
SECTORS = sys.argv[4]
NROWS = int(sys.argv[5])
NCOLUMNS = int(sys.argv[6])
PLOT_OPT = sys.argv[7]
FILE_EXT = sys.argv[8]
sDATE = sys.argv[9]
HOME_DIR = '/users/eow/garr/Work/Projects/CLIFFTOP/Paper_Synthesis/PLOTS_Methane/'
MISS_DATA = -999.9
LEGEND_POS = -1
if FILE_EXT not in ['png','jpg','eps','pdf']:
print "Invalid file format selected - will default to jpg"
FILE_EXT = 'jpg'
# Original settings
#WIDTH0,HEIGHT0 = 4.0, 5.0
#XMIN,XMAX,XINC = 2000,2100,10
#FONTSIZES = [ 8,8,8,10 ]
# Settings for Figures in Synthesis Paper - submitted (tight_layout commented out)
#WIDTH0,HEIGHT0 = 4.0, 5.0
#XMIN,XMAX,XINC = 2000,2100,20
#FONTSIZES = [ 10,10,10,12 ]
#LINE_WIDTH = 1.0
#DPI = 600
# Settings for Figures in Synthesis Paper - revised: same size with increased font size & linewidth
WIDTH0,HEIGHT0 = 4.0, 5.0
XMIN,XMAX,XINC = 2000,2100,20
FONTSIZES = [ 12,12,14,14 ]
LINE_WIDTH = 2.0
DPI = 300
# Settings for Figures in Synthesis Paper - revised: test smaller figure setting
#WIDTH0,HEIGHT0 = 2.0, 2.5
#XMIN,XMAX,XINC = 2000,2100,20
#FONTSIZES = [ 6, 6, 6, 8 ]
#LINE_WIDTH = 1.0
os.chdir(HOME_DIR)
# Information on JULES offline runs
if RCP_OPT == 'RCP26':
FILES_CH4 = [ \
# [' 0:','RCP2.6:', 'SSP2_CH4emissions_Region_RCP26.csv' ], \
# [' 1:','Ref:', 'SSP2_CH4emissions_Region_Ref.csv' ] \
[' 0:','SSP2 RCP2.6:', 'SSP2_CH4emissions_Region_RCP26.csv' ], \
[' 1:','SSP2 Baseline:', 'SSP2_CH4emissions_Region_Ref.csv' ] \
]
elif RCP_OPT == 'RCP19':
FILES_CH4 = [ \
[' 0:','SSP2 RCP1.9:', 'SSP2_CH4emissions_Region_RCP19.csv' ], \
[' 1:','SSP2 Baseline:', 'SSP2_CH4emissions_Region_Ref.csv' ] \
]
DATA_REGIONS = dict([ \
( '1', [ 'CAN', 'Canada', 10, 1 ]), \
( '23', [ 'USA', 'USA', 60, 10 ]), \
( '10', [ 'MEX', 'Mexico', 10, 1 ]), \
( '13', [ 'RCAM', 'Central America', 10, 1 ]), \
( '0', [ 'BRA', 'Brazil', 40, 5 ]), \
( '15', [ 'RSAM', 'Rest of S. America', 50, 5 ]), \
( '11', [ 'NAF', 'Northern Africa', 10, 1 ]), \
( '24', [ 'WAF', 'Western Africa', 50, 5 ]), \
( '4', [ 'EAF', 'Eastern Africa', 50, 5 ]), \
( '18', [ 'SAF', 'South Africa', 50, 5 ]), \
( '14', [ 'RSAF', 'Rest of S. Africa', 10, 1 ]), \
( '25', [ 'WEU', 'Western Europe', 50, 5 ]), \
( '2', [ 'CEU', 'Central Europe', 10, 1 ]), \
( '21', [ 'TUR', 'Turkey', 10, 1 ]), \
( '22', [ 'UKR', 'Ukraine Region', 10, 1 ]), \
( '20', [ 'STAN', 'Central Asia', 20, 5 ]), \
( '17', [ 'RUS', 'Russia Region', 50, 5 ]), \
( '9', [ 'ME', 'Middle East', 50, 5 ]), \
( '5', [ 'INDIA','India', 100, 10 ]), \
( '8', [ 'KOR', 'Korea Region', 10, 1 ]), \
( '3', [ 'CHN', 'China', 100, 10 ]), \
( '19', [ 'SEAS', 'Southeastern Asia', 25, 5 ]), \
( '6', [ 'INDO', 'Indonesia', 20, 5 ]), \
( '7', [ 'JAP', 'Japan', 10, 1 ]), \
( '16', [ 'RSAS', 'Rest of S. Asia', 20, 5 ]), \
( '12', [ 'OCE', 'Oceania', 30, 5 ]), \
( '26', [ 'World','World', 500, 50 ]) \
])
DATA_SECTORS = [ \
[ ' 0', 'Total' ,'Emissions|CH4' ], \
[ ' 1', 'Energy Ind' ,'Emissions|CH4|Energy Demand|Industry'], \
[ ' 2', 'Energy Res' ,'Emissions|CH4|Energy Demand|Residential and Commercial'], \
[ ' 3', 'Energy Tra' ,'Emissions|CH4|Energy Demand|Transportation'], \
[ ' 4', 'Energy Tra' ,'Emissions|CH4|Energy Demand|Transportation|Ground Transportation'], \
[ ' 5', 'Energy Sup' ,'Emissions|CH4|Energy Supply'], \
[ ' 6', 'Energy' ,'Emissions|CH4|Energy Supply and Demand'], \
[ ' 7', 'Land Use' ,'Emissions|CH4|Land Use'], \
[ ' 8', 'Agric Burn' ,'Emissions|CH4|Land Use|Agricultural Waste Burning'], \
[ ' 9', 'Agric' ,'Emissions|CH4|Land Use|Agriculture'], \
[ '10', 'Agric Waste' ,'Emissions|CH4|Land Use|Agriculture|AWM'], \
[ '11', 'Agric Cattle','Emissions|CH4|Land Use|Agriculture|Enteric Fermentation'], \
[ '12', 'Agric Rice' ,'Emissions|CH4|Land Use|Agriculture|Rice'], \
[ '13', 'Forest Burn' ,'Emissions|CH4|Land Use|Forest Burning'], \
[ '14', 'Savanna Burn','Emissions|CH4|Land Use|Savannah Burning'], \
[ '15', 'Other' ,'Emissions|CH4|Other'], \
[ '16', 'Waste' ,'Emissions|CH4|Waste'] \
]
nTIMES = 11
nFILES_CH4 = len(FILES_CH4)
nREGIONS_ALL = len(DATA_REGIONS)
nSECTORS_ALL = len(DATA_SECTORS)
if REGIONS == 'All':
nREGIONS = nREGIONS_ALL
# REGIONS_NUM = [ str(iREGION) for iREGION in range(nREGIONS) ]
REGIONS_NUM = [ '26','1','23','25','2','22','17','10','11','21','20','3','8', \
'7','13','24','4','9','5','19','6','0','15','18','14','16','12' ]
else:
REGIONS_NUM = REGIONS.split(':')
nREGIONS = len(REGIONS_NUM)
print('Region codes: ',REGIONS_NUM)
if SECTORS == 'All':
nSECTORS = nSECTORS_ALL
SECTORS_NUM = [ str(iSECTOR) for iSECTOR in range(nSECTORS) ]
else:
SECTORS_NUM = SECTORS.split(':')
nSECTORS = len(SECTORS_NUM)
print('Sector codes: ',SECTORS_NUM)
if nREGIONS > NROWS*NCOLUMNS:
print('Invalid plot configuration: nREGIONS <= nROWS*nCOLUMNS')
quit()
# Loop over and input from CH4 files
INPUT_CH4_ALL = []
SCENARIO_CH4 = []
for iCH4 in range(nFILES_CH4):
INPUT_CH4_FILE = []
SCENARIO_CH4.append(FILES_CH4[iCH4][1])
FILE_CSV = FILES_CH4[iCH4][2]
print('Opening file: '+FILE_CSV)
for LINE in open(FILE_CSV):
LINE_SPLIT = LINE.replace('\n','').replace('\r','').split(',')
INPUT_CH4_FILE.append(LINE_SPLIT)
if DEBUG == 'Y': print(len(LINE_SPLIT),LINE_SPLIT)
INPUT_CH4_ALL.append(INPUT_CH4_FILE)
# Extract information - each file: nREGIONS_ALL * nSECTORS_ALL + 1
# Assume regions and sectors in order in REGIONS_DATA and SECTORS_DATA
WIDTH,HEIGHT = WIDTH0*NCOLUMNS,HEIGHT0*NROWS
NXTICKS = int((XMAX-XMIN)/XINC)+1
XTICKS = XMIN+XINC*np.arange(NXTICKS)
XLABEL = 'Year'
YLABEL = 'CH$_4$ Emissions (Mt CH$_4$ yr$^{-1}$)'
XPLOT_A,YPLOT_A = [],[]
XMIN_A,XMAX_A,XTICKS_A,XLABEL_A = [],[],[],[]
YMIN_A,YMAX_A,YTICKS_A,YLABEL_A = [],[],[],[]
NDATASETS = []
SUBTITLES = []
LEGEND = []
PLOT_CODES = []
LINE_CODES = []
PLOT_TEXT = []
PLOT_TITLE = 'CH$_4$ Emissions by IMAGE Region'
FILE_PLOT = HOME_DIR+'Plot_Methane_Emissions_Image_Region_'+sDATE+'.'+FILE_EXT
LINE_CODES_BAS = [':','-']
PLOT_COLOURS = ['black','darkorange','blue' ,'green' ,'magenta' ,'yellow' ,'cyan' ]
for sREGION in REGIONS_NUM:
iREGION = int(sREGION)
xFIRST = True
XPLOT,YPLOT = [],[]
XMIN_A.append(XMIN)
XMAX_A.append(XMAX)
XTICKS_A.append(XTICKS)
XLABEL_A.append(XLABEL)
YMIN,YMAX,YINC = 0,int(DATA_REGIONS[sREGION][2]),int(DATA_REGIONS[sREGION][3])
NYTICKS = int((YMAX-YMIN)/YINC)+1
YTICKS = YMIN+YINC*np.arange(NYTICKS)
YMIN_A.append(YMIN)
YMAX_A.append(YMAX)
YTICKS_A.append(YTICKS)
YLABEL_A.append(YLABEL)
SUBTITLES.append(DATA_REGIONS[sREGION][1])
LINE_CODES_TMP = []
PLOT_CODES_TMP = []
iCODE = 0
for sSECTOR in SECTORS_NUM:
iSECTOR = int(sSECTOR)
iLINE = iSECTOR+iREGION*nSECTORS_ALL+1
TEMP_X = []
TEMP_Y = []
for iTIME in range(nTIMES):
TEMP_X.append(float(INPUT_CH4_ALL[0][0][iTIME+5]))
TEMP_Y.append(float(INPUT_CH4_ALL[0][iLINE][iTIME+5]))
if xFIRST:
XPLOT.append(TEMP_X)
xFIRST = False
YPLOT.append(TEMP_Y)
LINE_CODES_TMP.append(LINE_CODES_BAS[0])
PLOT_CODES_TMP.append(PLOT_COLOURS[iCODE])
iCODE += 1
iCODE = 0
for sSECTOR in SECTORS_NUM:
iSECTOR = int(sSECTOR)
iLINE = iSECTOR+iREGION*nSECTORS_ALL+1
TEMP_X = []
TEMP_Y = []
for iTIME in range(nTIMES):
TEMP_X.append(float(INPUT_CH4_ALL[1][0][iTIME+5]))
TEMP_Y.append(float(INPUT_CH4_ALL[1][iLINE][iTIME+5]))
YPLOT.append(TEMP_Y)
LINE_CODES_TMP.append(LINE_CODES_BAS[1])
PLOT_CODES_TMP.append(PLOT_COLOURS[iCODE])
iCODE += 1
LINE_CODES.append(LINE_CODES_TMP)
PLOT_CODES.append(PLOT_CODES_TMP)
XPLOT_A.append(np.array(XPLOT).squeeze())
YPLOT_A.append(np.swapaxes(np.array(YPLOT),0,1))
NDATASETS.append(nSECTORS*2)
# Names of sectors
for sSECTOR in SECTORS_NUM:
iSECTOR = int(sSECTOR)
LEGEND.append(SCENARIO_CH4[0]+' '+DATA_SECTORS[iSECTOR][1])
for sSECTOR in SECTORS_NUM:
iSECTOR = int(sSECTOR)
LEGEND.append(SCENARIO_CH4[1]+' '+DATA_SECTORS[iSECTOR][1])
#DEBUG = 'Y'
if DEBUG == 'Y':
print('XPLOT_A: ',XPLOT_A)
print('YPLOT_A: ',YPLOT_A)
print('PLOT_CODES: ',PLOT_CODES)
print('LEGEND: ',LEGEND_POS,LEGEND)
plot_functions.Plot_General_MultiPlot_general2( \
NROWS,NCOLUMNS,NDATASETS,XPLOT_A,YPLOT_A, \
XMIN_A,XMAX_A,XTICKS_A,XTICKS_A,XLABEL_A, \
YMIN_A,YMAX_A,YTICKS_A,YTICKS_A,YLABEL_A, \
WIDTH,HEIGHT,FONTSIZES,PLOT_TITLE,SUBTITLES,PLOT_TEXT, \
LEGEND,LEGEND_POS,PLOT_CODES,PLOT_OPT,FILE_PLOT,DEBUG,DPI,LINE_WIDTH,LINE_CODES)
# End of Program
|
# 650. 2 Keys Keyboard
# Runtime: 52 ms, faster than 61.81% of Python3 online submissions for 2 Keys Keyboard.
# Memory Usage: 14.2 MB, less than 59.32% of Python3 online submissions for 2 Keys Keyboard.
class Solution:
# Prime Factorization
def minSteps(self, n: int) -> int:
ans = 0
d = 2
while n > 1:
while n % d == 0:
ans += d
n /= d
d += 1
return ans |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unnecessary-comprehension
"""
Simulate the imported model in x86.
"""
import logging
import tarfile
import tempfile
import os
import yaml
import time
import numpy as np
import tvm
from tvm import runtime
from tvm.contrib import graph_executor
from tvm.contrib import hhb_runtime
from core.frontend_manage import import_model
from core.common import hhb_register_parse, print_top5, HHBException, ensure_dir, AttributeDict
from core.common import generate_config_file, ALL_ARGUMENTS_DESC, collect_arguments_info
from core.arguments_manage import (
add_preprocess_argument,
add_common_argument,
add_simulate_argument,
add_postprocess_argument,
add_import_argument,
add_optimize_argument,
add_quantize_argument,
add_codegen_argument,
ArgumentFilter,
)
from core.hhbir_manage import (
HHBRelayIR,
HHBQNNIR,
HHBFloatCodegenIR,
HHBX86QnnCodegenIR,
get_input_info_from_relay,
get_output_info_from_relay,
)
from core.quantization_manage import (
collect_quantization_config,
set_quantize_params_by_board,
get_quantize_config,
)
from core.preprocess_manage import (
collect_preprocess_config,
set_preprocess_params,
DatasetLoader,
)
from core.codegen_manage import collect_codegen_config
# pylint: disable=invalid-name
logger = logging.getLogger("HHB")
@hhb_register_parse
def add_benchmark_parser(subparsers):
""" Include parser for 'benchmark' subcommand """
parser = subparsers.add_parser("benchmark")
parser.set_defaults(func=driver_benchmark)
parser.add_argument(
"--reference-label", metavar="", type=str, help="The true labels of test dataset."
)
parser.add_argument(
"--print-interval",
metavar="",
type=int,
default=100,
help="Print log every time how many images are inferred",
)
parser.add_argument("--save-temps", action="store_true", help="Save temp files.")
parser.add_argument(
"--no-quantize", action="store_true", help="If set, don't quantize the model."
)
add_import_argument(parser)
add_quantize_argument(parser)
add_simulate_argument(parser)
add_preprocess_argument(parser)
add_postprocess_argument(parser)
add_common_argument(parser)
add_optimize_argument(parser)
add_codegen_argument(parser)
parser.add_argument("-v", "--verbose", action="count", default=0, help="Increase verbosity")
parser.add_argument(
"-o",
"--output",
default="hhb_out",
help="The directory that holds the result files.",
)
parser.add_argument(
"FILE", nargs="+", help="Path to the input model file, can pass multi files"
)
ALL_ARGUMENTS_DESC["benchmark"] = collect_arguments_info(parser._actions)
def driver_benchmark(args_filter: ArgumentFilter):
"""Driver main command"""
args = args_filter.filtered_args
args.output = ensure_dir(args.output)
if args.generate_config:
generate_config_file(os.path.join(args.output, "cmd_benchmark_params.yml"))
mod, params = import_model(
args.FILE, args.model_format, args.input_name, args.input_shape, args.output_name
)
relay_ir = HHBRelayIR()
relay_ir.set_model(mod, params)
input_name_list, input_shape_list, _ = get_input_info_from_relay(mod, params)
# filter arguments and prepare all needed args
all_filters = [
collect_preprocess_config,
set_preprocess_params,
collect_quantization_config,
set_quantize_params_by_board,
collect_codegen_config,
]
extra_args = AttributeDict()
extra_args.input_shape = input_shape_list
args_filter.filter_argument(all_filters, extra=extra_args)
args = args_filter.filtered_args
if not args.no_quantize:
dataset_list = []
if args.calibrate_dataset:
logger.info("get calibrate dataset from %s", args.calibrate_dataset)
dl = DatasetLoader(
args.calibrate_dataset, args.preprocess_config, input_shape_list, input_name_list
)
dataset = dl.get_data()
for d in dataset:
dataset_list.append(d)
qconfig = get_quantize_config(args.quantize_config)
qnn_ir = HHBQNNIR()
qnn_ir.convert((mod, params), qconfig, dataset_list)
if args.board == "x86_ref":
if args.no_quantize:
x86_codegen_ir = HHBFloatCodegenIR()
x86_codegen_ir.convert((mod, params), args.board, args.opt_level)
else:
x86_codegen_ir = HHBX86QnnCodegenIR()
config_dict = get_quantize_config(args.quantize_config)
x86_codegen_ir.convert(
qnn_ir.get_model(), args.board, args.opt_level, args.output, config_dict
)
else:
raise HHBException("can not simulate anole, light or c860.")
ctx = tvm.cpu(0)
if args.no_quantize:
m = graph_executor.GraphModule(x86_codegen_ir.get_model()["default"](ctx))
else:
m = hhb_runtime.create(
x86_codegen_ir.get_model(), qnn_ir.get_model()[0], ctx, output_dir=args.output
)
m.set_params(os.path.join(args.output, x86_codegen_ir.params_name))
dl = DatasetLoader(
args.simulate_data,
args.preprocess_config,
input_shape_list,
input_name_list,
)
dataset = dl.get_data()
# prepare inference labels
# if args.simulate_data is None:
# raise HHBException("Please specify validate dataset directory.")
imgname2label = {}
if not args.reference_label:
raise HHBException("Please specify validate label.")
with open(args.reference_label, "r") as f:
for line in f:
tmp = line.strip().split(" ")
imgname = tmp[0]
label = int(tmp[-1])
imgname2label[imgname] = label
inter_log_list = ["Filename \t\t\t\t\t\t\t Top1 \t\t\t Top5\n"]
top1 = 0
top5 = 0
index = 0
t_total_start = time.time()
t_mid_start = t_total_start
t_total = 0
for data in dataset:
inter_log = ""
m.run(**data)
output = m.get_output(0).asnumpy()
output = np.squeeze(output)
idx = np.argsort(output)
idx = idx[::-1]
label = imgname2label[dl.all_file_path[index].strip().split("/")[-1]]
if output.size == 1001:
label += 1
inter_log += os.path.basename(dl.all_file_path[index]) + " \t\t "
if idx[0] == label:
top1 += 1
inter_log += "true" + " \t\t\t "
else:
inter_log += "false" + " \t\t\t "
if label in idx[:5]:
top5 += 1
inter_log += "true\n"
else:
inter_log += "false\n"
inter_log_list.append(inter_log)
index += 1
if index % args.print_interval == 0:
t_mid_end = time.time()
print(
"num-{} top1:{}, top5:{}, time cost: {}s".format(
index, (top1 / index), (top5 / index), (t_mid_end - t_mid_start)
)
)
t_total += t_mid_end - t_mid_start
t_mid_start = t_mid_end
if args.save_temps:
args.output = ensure_dir(args.output)
with open(os.path.join(args.output, "inter_results.log"), "w") as f:
f.writelines(inter_log_list)
print("Total time cost: {}s".format(t_total))
|
from logging import (
StreamHandler
)
from pystashlog.connection import (
Connection,
INVALID_MESSAGE_TYPE_ERROR,
)
from pystashlog.secure_connection import SecureConnection
from pystashlog.exceptions import (
StashError,
ConnectionError,
TimeoutError,
)
from pystashlog.logger import logger as log
class Stash(StreamHandler):
def __init__(self, host='localhost', port=5000, socket_type=0,
socket_timeout=None, socket_connect_timeout=None,
socket_keepalive_options=None, retry_on_timeout=False,
socket_keepalive=False, ssl=False, ssl_keyfile=None,
ssl_certfile=None, ssl_cert_reqs='required', ssl_ca_certs=None,
ssl_check_hostname=False,
health_check_interval=0):
# call parent's constructor
super(Stash, self).__init__()
self.ssl = ssl
self.kwargs = {
'host': host,
'port': port,
'socket_connect_timeout': socket_connect_timeout,
'socket_keepalive': socket_keepalive,
'socket_keepalive_options': socket_keepalive_options,
'socket_timeout': socket_timeout,
'retry_on_timeout': retry_on_timeout,
'health_check_interval': health_check_interval
}
if ssl:
log.info('stash uses SSL')
self.kwargs.update({
'ssl_keyfile': ssl_keyfile,
'ssl_certfile': ssl_certfile,
'ssl_cert_reqs': ssl_cert_reqs,
'ssl_ca_certs': ssl_ca_certs,
'ssl_check_hostname': ssl_check_hostname,
})
self.connection = None
self._create_connection()
# create socket connection
def _create_connection(self):
log.info('stash creating connection')
# check if the connection uses SSL
if self.ssl:
self.connection = SecureConnection(**self.kwargs)
else:
# default connection without SSL
self.connection = Connection(**self.kwargs)
# call connect function from Connection
self.connection.connect()
log.info('stash client connected to the logstash server')
# write message to socket
def write(self, message):
if isinstance(message, str):
self.connection.write_str(message)
elif isinstance(message, bytes):
self.connection.write_bytes(message)
else:
log.error('error: writing message to logstash server')
raise StashError(INVALID_MESSAGE_TYPE_ERROR)
# override emit from logging.StreamHandler
def emit(self, record):
log_entry = self.format(record)
self.write(log_entry)
# flush
self.flush()
# override close from logging.StreamHandler -> logging.Handler
def close(self):
self.disconnect()
# disconnect socket
def disconnect(self):
log.info('releasing stash connection')
if self.connection is None:
return
try:
self.connection.close()
except OSError as e:
pass
log.info('releasing stash connection succeed')
self.connection = None |
# -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.geb.tests.base import (
BaseWebTest,
)
from openprocurement.auctions.geb.tests.blanks.draft import (
phase_commit,
check_generated_rectification_period,
check_generated_tender_period,
check_generated_enquiry_period,
invalid_phase_commit,
phase_commit_invalid_auctionPeriod
)
from openprocurement.auctions.geb.tests.helpers import (
change_machine_state
)
from openprocurement.auctions.geb.tests.states import (
ProcedureMachine
)
from openprocurement.auctions.geb.tests.fixtures.draft import (
AUCTION_WITH_INVALID_AUCTON_PERIOD,
)
class StatusDraftTest(BaseWebTest):
test_phase_commit = snitch(phase_commit)
test_check_generated_rectification_period = snitch(check_generated_rectification_period)
test_check_generated_tender_period = snitch(check_generated_tender_period)
test_check_generated_enquiry_period = snitch(check_generated_enquiry_period)
test_invalid_phase_commit = snitch(invalid_phase_commit)
def setUp(self):
super(StatusDraftTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
change_machine_state(procedure, 'draft')
context = procedure.snapshot()
auction = context['auction']
entrypoints = {}
entrypoints['get_auction'] = '/auctions/{}'.format(auction['data']['id'])
entrypoints['patch_auction'] = '/auctions/{}?acc_token={}'.format(auction['data']['id'],
auction['access']['token'])
self.auction = auction
self.ENTRYPOINTS = entrypoints
class StatusDraftInvalidAuctionPeriodTest(BaseWebTest):
test_phase_commit_invalid_auctionPeriod = snitch(phase_commit_invalid_auctionPeriod)
def setUp(self):
super(StatusDraftInvalidAuctionPeriodTest, self).setUp()
procedure = ProcedureMachine()
procedure.set_db_connector(self.db)
change_machine_state(procedure, 'draft')
context = procedure.snapshot(fixture=AUCTION_WITH_INVALID_AUCTON_PERIOD)
auction = context['auction']
entrypoints = {}
entrypoints['patch_auction'] = '/auctions/{}?acc_token={}'.format(auction['data']['id'],
auction['access']['token'])
self.auction = auction
self.ENTRYPOINTS = entrypoints
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(StatusDraftTest))
suite.addTest(unittest.makeSuite(StatusDraftInvalidAuctionPeriodTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
num=1
for i in range (1,5):
for j in range (1,i+1):
print(num,end="")
num+=1
print()
|
import numpy as np
import pandas as pd
from tiro_fhir import ValueSet
@pd.api.extensions.register_series_accessor("code")
class CodeOperations:
def __init__(self, pandas_obj):
self._obj: pd.Series = pandas_obj
def isin(self, vs: ValueSet):
return self._obj.apply(lambda c: c in vs).astype("bool")
def unique(self):
flattened = []
for i, x in self._obj.iteritems():
flattened.extend(x.coding)
return np.array(flattened)
|
#! /usr/bin/env python
"""
RASPA file format and default parameters.
"""
GENERIC_PSEUDO_ATOMS_HEADER = [
['# of pseudo atoms'],
['29'],
['#type ', 'print ', 'as ', 'chem ', 'oxidation ', 'mass ', 'charge ', 'polarization ', 'B-factor radii ', 'connectivity ', 'anisotropic ', 'anisotropic-type ', 'tinker-type ']
]
GENERIC_PSEUDO_ATOMS = [
['He ' ,'yes' ,'He' ,'He' ,'0' ,'4.002602' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.0 ' ,'0' ,'0' ,'relative' ,'0'],
['CH4_sp3' ,'yes' ,'C ' ,'C ' ,'0' ,'16.04246' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['CH3_sp3' ,'yes' ,'C ' ,'C ' ,'0' ,'15.03452' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['CH2_sp3' ,'yes' ,'C ' ,'C ' ,'0' ,'14.02658' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['CH_sp3 ' ,'yes' ,'C ' ,'C ' ,'0' ,'13.01864' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['C_sp3 ' ,'yes' ,'C ' ,'C ' ,'0' ,'12.0 ' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['H_h2 ' ,'yes' ,'H ' ,'H ' ,'0' ,'1.00794 ' ,' 0.468 ' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['H_com ' ,'no ' ,'H ' ,'H ' ,'0' ,'0.0 ' ,'-0.936 ' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['C_co2 ' ,'yes' ,'C ' ,'C ' ,'0' ,'12.0 ' ,' 0.70 ' ,'0.0' ,'1.0' ,'0.720' ,'0' ,'0' ,'relative' ,'0'],
['O_co2 ' ,'yes' ,'O ' ,'O ' ,'0' ,'15.9994 ' ,'-0.35 ' ,'0.0' ,'1.0' ,'0.68 ' ,'0' ,'0' ,'relative' ,'0'],
['O_o2 ' ,'yes' ,'O ' ,'O ' ,'0' ,'15.9994 ' ,'-0.112 ' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['O_com ' ,'no ' ,'O ' ,'- ' ,'0' ,'0.0 ' ,' 0.224 ' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['N_n2 ' ,'yes' ,'N ' ,'N ' ,'0' ,'14.00674' ,'-0.4048' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['N_com ' ,'no ' ,'N ' ,'- ' ,'0' ,'0.0 ' ,' 0.8096' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['Ar ' ,'yes' ,'Ar' ,'Ar' ,'0' ,'39.948 ' ,' 0.0 ' ,'0.0' ,'1.0' ,'0.7 ' ,'0' ,'0' ,'relative' ,'0'],
['Ow ' ,'yes' ,'O ' ,'O ' ,'0' ,'15.9994 ' ,' 0.0 ' ,'0.0' ,'1.0' ,'0.5 ' ,'2' ,'0' ,'relative' ,'0'],
['Hw ' ,'yes' ,'H ' ,'H ' ,'0' ,'1.00794 ' ,' 0.241 ' ,'0.0' ,'1.0' ,'1.00 ' ,'1' ,'0' ,'relative' ,'0'],
['Lw ' ,'no ' ,'L ' ,'H ' ,'0' ,'0.0 ' ,'-0.241 ' ,'0.0' ,'1.0' ,'1.00 ' ,'1' ,'0' ,'relative' ,'0'],
['C_benz ' ,'yes' ,'C ' ,'C ' ,'0' ,'12.0 ' ,'-0.095 ' ,'0.0' ,'1.0' ,'0.70 ' ,'0' ,'0' ,'relative' ,'0'],
['H_benz ' ,'yes' ,'H ' ,'H ' ,'0' ,'1.00794 ' ,' 0.095 ' ,'0.0' ,'1.0' ,'0.320' ,'0' ,'0' ,'relative' ,'0'],
['N_dmf ' ,'yes' ,'N ' ,'N ' ,'0' ,'14.00674' ,'-0.57 ' ,'0.0' ,'1.0' ,'0.50 ' ,'0' ,'0' ,'relative' ,'0'],
['Co_dmf ' ,'yes' ,'C ' ,'C ' ,'0' ,'12.0 ' ,' 0.45 ' ,'0.0' ,'1.0' ,'0.52 ' ,'0' ,'0' ,'relative' ,'0'],
['Cm_dmf ' ,'yes' ,'C ' ,'C ' ,'0' ,'12.0 ' ,' 0.28 ' ,'0.0' ,'1.0' ,'0.52 ' ,'0' ,'0' ,'relative' ,'0'],
['O_dmf ' ,'yes' ,'O ' ,'O ' ,'0' ,'15.9994 ' ,'-0.50 ' ,'0.0' ,'1.0' ,'0.78 ' ,'0' ,'0' ,'relative' ,'0'],
['H_dmf ' ,'yes' ,'H ' ,'H ' ,'0' ,'1.00794 ' ,' 0.06 ' ,'0.0' ,'1.0' ,'0.22 ' ,'0' ,'0' ,'relative' ,'0'],
['Na ' ,'yes' ,'Na' ,'Na' ,'0' ,'22.98977' ,' 1.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['Cl ' ,'yes' ,'Cl' ,'Cl' ,'0' ,'35.453 ' ,'-1.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['Kr ' ,'yes' ,'Kr' ,'Kr' ,'0' ,'83.798 ' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
['Xe ' ,'yes' ,'Xe' ,'Xe' ,'0' ,'131.293 ' ,' 0.0 ' ,'0.0' ,'1.0' ,'1.00 ' ,'0' ,'0' ,'relative' ,'0'],
]
GENERIC_FF_MIXING_HEADER = [
['# general rule for shifted vs truncated '],
['shifted '],
['# general rule tailcorrections '],
['no '],
['# number of defined interactions '],
['55 '],
['# type interaction, parameters. IMPORTANT: define shortest matches first, so that more specific ones overwrites these '],
]
GENERIC_FF_MIXING = [
['He ' , 'lennard-jones' ,'10.9 ' ,'2.64 '],
['CH4_sp3 ' , 'lennard-jones' ,'158.5 ' ,'3.72 '],
['CH3_sp3 ' , 'lennard-jones' ,'108.0 ' ,'3.76 '],
['CH2_sp3 ' , 'lennard-jones' ,'56.0 ' ,'3.96 '],
['CH_sp3 ' , 'lennard-jones' ,'17.0 ' ,'4.67 '],
['C_sp3 ' , 'lennard-jones' ,' 0.8 ' ,'6.38 '],
['H_com ' , 'lennard-jones' ,'36.7 ' ,'2.958 '],
['H_h2 ' , 'none ' ,' ' ,' '],
['O_co2 ' , 'lennard-jones' ,'79.0 ' ,'3.05 '],
['C_co2 ' , 'lennard-jones' ,'27.0 ' ,'2.80 '],
['C_benz ' , 'lennard-jones' ,'30.70 ' ,'3.60 '],
['H_benz ' , 'lennard-jones' ,'25.45 ' ,'2.36 '],
['N_n2 ' , 'lennard-jones' ,'36.0 ' ,'3.31 '],
['N_com ' , 'none ' ,' ' ,' '],
['Ow ' , 'lennard-jones' ,'89.633' ,'3.097 '],
['N_dmf ' , 'lennard-jones' ,'80.0 ' ,'3.2 '],
['Co_dmf ' , 'lennard-jones' ,'50.0 ' ,'3.7 '],
['Cm_dmf ' , 'lennard-jones' ,'80.0 ' ,'3.8 '],
['O_dmf ' , 'lennard-jones' ,'100.0 ' ,'2.96 '],
['H_dmf ' , 'lennard-jones' ,'8.0 ' ,'2.2 '],
['Ar ' , 'lennard-jones' ,'119.8 ' ,'3.34 '],
['Kr ' , 'lennard-jones' ,'166.4 ' ,'3.636 '],
['Xe ' , 'lennard-jones' ,'221.0 ' ,'4.1 '],
]
GENERIC_FF_MIXING_FOOTER = [
['# general mixing rule for Lennard-Jones '],
['Lorentz-Berthelot '],
]
|
'''
The sum of the squares of the first ten natural numbers is,
1^2 + 2^2 + ... + 10^2 = 385
The square of the sum of the first ten natural numbers is,
(1 + 2 + ... + 10)^2 = 55^2 = 3025
Hence the difference between the sum of the squares of the first ten natural numbers and the square of the sum is 3025 − 385 = 2640.
Find the difference between the sum of the squares of the first one hundred natural numbers and the square of the sum.
'''
squaring_each_number_and_sum = sum([i ** 2 for i in range(1, 101)]) # Square each number and return sums
get_sum_and_square_them = sum(range(1, 101)) ** 2
result = get_sum_and_square_them - squaring_each_number_and_sum
print(result)
|
"""Change ID type to BigInteger
Revision ID: 1fb4a98e3a4d
Revises: cd9f3f359db1
Create Date: 2022-02-08 14:33:53.746979
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1fb4a98e3a4d'
down_revision = 'cd9f3f359db1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('games', 'id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False,
autoincrement=True)
op.alter_column('games', 'setter_chat_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
op.alter_column('games', 'chat_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('games', 'chat_id',
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=False)
op.alter_column('games', 'setter_chat_id',
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=False)
op.alter_column('games', 'id',
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=False,
autoincrement=True)
# ### end Alembic commands ###
|
# Copyright (c) Microsoft Corporation
# Licensed under the MIT License.
class DuplicateManagerConfigException(Exception):
"""An exception indicating that a duplicate configuration
was detected in any of the RAI managers.
:param exception_message: A message describing the error.
:type exception_message: str
"""
_error_code = 'Duplicate RAI configuration detected.'
class UserConfigValidationException(Exception):
"""An exception indicating that some user configuration is not valid.
:param exception_message: A message describing the error.
:type exception_message: str
"""
_error_code = 'Invalid config'
|
from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeI2CMasterFeature(SeaBreezeFeature):
identifier = 'i2c_master'
def get_number_of_buses(self):
raise NotImplementedError("implement in derived class")
def read_bus(self, bus_index, slave_address, buffer_length=1024):
raise NotImplementedError("implement in derived class")
def write_bus(self, bus_index, slave_address, data):
raise NotImplementedError("implement in derived class")
|
import csv
if __name__ == "__main__":
# load data (same as part #1)
data = []
with open("input1.txt", "r") as csvfile:
reader = csv.reader(csvfile)
for row in reader:
if len(row) != 0:
data.append(row[0].split())
# go through data (list of lists of conditions / passwords)
valid = 0
for cond, letter, password in data:
# convert indices to index from 0
idx1,idx2 = [int(c)-1 for c in cond.split("-")]
# get rid of the ':'
l = letter[0]
# check if only one index has the correct password
if (password[idx1] == l) ^ (password[idx2] == l):
valid += 1
print(valid)
|
l = [1, 4, 5, 6, 7, 8]
print(l)
|
import re
s = input("Do you agree?\n")
if re.search("^y(es)?$", s, re.IGNORECASE):
print("Agreed")
elif re.search("^n(o)?$", s):
print("Not agreed") |
#Project by Maame Yaa Osei, Maame Efua Boham & Nana Ama Parker
#Music Player
#Importing libraries
import pygame
from mutagen.id3 import ID3
from mutagen.mp3 import MP3
import os
from tkinter.filedialog import *
from tkinter import *
from HashTable import *
#Creating window in Tkinter, setting its size and title
root = Tk()
root.minsize(500,500)
root.title("A8 MUSIC PLAYER")
#Setting popup labels
label = StringVar()
songLabel = Label(root,textvariable=label, width = 70)
'''
Artiste class takes a name and has instance variables :
- name
- album
- song
There are the accessor methods:
- getName: which retrieves the name of the artiste
- getAlbums: which returns the albums of the artiste
- getSongs: which returns the songs of the artiste
There are the mutator methods:
- addSong: which adds a song to the artiste's list of songs
- addAlbum: which adds an album to the artiste's list of albums
An __str__() method provides a string representation of the Artiste
'''
class Artiste:
def __init__(self, name):
self.name = name
self.album = []
self.song = []
def addSong(self,song):
(self.song).append(song)
def addAlbum(self,album):
(self.album).append(album)
def getName(self):
return self.name
def getAlbums(self):
for i in self.album:
return(i)
def getSongs(self):
for i in self.song:
return(i)
def __str__(self):
return (self.name)
'''
Song class takes a title and has instance variables :
- title
- artiste
- length
- album
There are the accessor methods:
- getTitle: which retrieves the name of the song
- getArtiste: which returns the artiste of the song
- getAlbum: which returns the album containing the song
- getLength: which returns the length of the song
There are the mutator methods:
- setLength: which passes a length to the self.length instance variable
- setArtiste: which sets the artiste of the song
- setAlbum: which sets the album of the song
An __str__() method provides a string representation of the Song
'''
class Song:
def __init__(self,title):
self.title = title
self.artiste = None
self.length = ""
self.album = None
def getTitle(self):
return self.title
def getArtiste(self):
return self.artiste
def getAlbum(self):
return self.album
def getLength(self):
return str(round(self.length,2))+" seconds"
def setLength(self,length):
self.length = length
def setArtiste(self,artiste):
self.artiste = artiste
def setAlbum(self,album):
self.album = album
def __str__(self):
return (self.title)
'''
Album class takes a title and has instance variables :
- title
- artiste
- songs
There are the accessor methods:
- getTitle: which retrieves the name of the album
- getArtiste: which returns the artiste of the album
- getSongs: which returns the songs in the album
There are the mutator methods:
- setTitle: which sets the title of the album
- addArtiste: which sets the artist of the album
- addSong: which adds songs to the album
An __str__() method provides a string representation of the Album
'''
class Album:
def __init__(self, title):
self.title = title
self.songs = []
self.artiste = None
def getTitle(self):
return self.title
def getSongs(self):
for i in self.songs:
return (i)
def getArtiste(self):
return self.artiste
def setTitle(self, newTitle):
self.Title = newTitle
def addArtiste(self, newArtiste):
self.artiste = newArtiste
def addSong(self, song):
self.songs.append(song)
def __str__(self):
return (self.title)
# Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005
#queue.py
class Queue:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def enqueue(self, item):
self.items.insert(0,item)
def dequeue(self):
return self.items.pop()
def size(self):
return len(self.items)
# Bradley N. Miller, David L. Ranum
# Introduction to Data Structures and Algorithms in Python
# Copyright 2005
#stack.py
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
playQueue = Queue() #creating an instance of the queue class
prevStack = Stack() #creating an instance of the stack class
pygame.mixer.init() #initializing pygame
songNames = []
songArtistes=[] #create lists to contain the artiste,length.
songLength=[] #and album metadata which is stripped from the songs
songAlbum = []
index = 0 #creating index to loop through list of song names and label.
"""
A function that accesses a selected directory,
retrieves all songs with '.mp3' format, extracts the metadata using mutagen,
and enqueues the .mp3 files
"""
def getMusicData():
myDirectory = askdirectory()
os.chdir(myDirectory)
for files in os.listdir(myDirectory):
if files.endswith(".mp3"):
newDirectory = os.path.realpath(files)
audio = ID3(newDirectory)
audio_ = MP3(newDirectory)
songNames.append(audio['TIT2'].text[0])
songArtistes.append(audio['TPE1'].text[0])
songAlbum.append(audio["TALB"].text[0])
songLength.append(audio_.info.length)
playQueue.enqueue(files)
getMusicData()
# A function that uses a global index to
# update the label of the song which is playing
def updateLabel():
global index
label.set("Now Playing: "+songNames[index])
# A function that takes an event as a parameter, dequeues a song, plays it,
# and calls the updateLabel() function.
def playSong(event):
song = playQueue.dequeue()
prevStack.push(song)
pygame.mixer.music.load(song)
pygame.mixer.music.play()
updateLabel()
#A function that plays the next song in the queue as long as the queue is
#not empty and updates the label.
def nextSong(event):
try:
global index
index += 1
nextSong=(playQueue.dequeue())
prevStack.push(nextSong)
pygame.mixer.music.load(nextSong)
pygame.mixer.music.play()
updateLabel()
except IndexError:
pygame.mixer.music.stop()
label.set("Queue is empty")
#A function that plays the previous song in the queue by popping it off a
#stack and updating the label.
def prevSong(event):
try:
global index
index -= 1
prevSong= prevStack.pop()
pygame.mixer.music.load(prevSong)
pygame.mixer.music.play()
updateLabel()
except IndexError:
pygame.mixer.music.stop()
label.set("No previous song")
#Funtcion to pause music
def pauseSong(event):
pygame.mixer.music.pause()
label.set("")
#Function to continue paused music
def continueSong(event):
pygame.mixer.music.unpause()
global index
label.set("Now Playing:" + songNames[index])
#Function to stop music.
def stopSong(event):
pygame.mixer.music.stop()
label.set("")
#Creates a listbox to display queue of songs in Tkinter window.
listBox = Listbox(root, width= 50)
listBox.pack()
#Reverses list of song names, inserts them in the listbox, and reverses it back to original order.
songNames.reverse()
for names in songNames:
listBox.insert(0,names)
songNames.reverse()
#Creating buttons to handle events, labelling them and packing them into Tkinter window.
playButton = Button(root, text= 'Start Playing!')
playButton.pack()
nextButton = Button(root,text = 'Next Song')
nextButton.pack()
previousButton = Button(root,text = 'Previous Song')
previousButton.pack()
pauseButton = Button(root, text= 'Pause')
pauseButton.pack()
continueButton = Button(root, text= 'Continue')
continueButton.pack()
stopButton = Button(root,text='Stop Music')
stopButton.pack()
#Binding buttons created to their respective functions to enable event to execute on click.
playButton.bind("<Button-1>",playSong)
nextButton.bind("<Button-1>",nextSong)
previousButton.bind("<Button-1>",prevSong)
pauseButton.bind("<Button-1>",pauseSong)
continueButton.bind("<Button-1>",continueSong)
stopButton.bind("<Button-1>",stopSong)
#Packing label to display what song is currently playing in Tkinter window.
songLabel.pack()
songs = [] #initializing list of song objects
artistes = [] #initializing list of artiste objects
albums = [] #initializing list of album objects
songMap = HashTable() #creating hashtable of song objects
albumMap = HashTable() #creating hashtable of album objects
artisteMap = HashTable() #creating hashtable of artiste objects
'''
Loops through list of song names and creates instances of an artiste,
an album and a song for each song. Appends each object to its respective
list of objects. Adds songs for a particular artiste to that artiste's
list of songs. Adds songs that belong to a particular album to that album's
list of songs.
'''
def sortSongs():
for i in range (len(songNames)):
artiste = Artiste(songArtistes[i])
artistes.append(artiste)
album = Album(songAlbum[i])
albums.append(album)
song = Song(songNames[i])
song.setArtiste(artistes[i])
song.setAlbum(albums[i])
song.setLength(songLength[i])
songs.append(song)
songMap.put(songNames[i],songs[i])
if songs[i].getArtiste().getName() == artistes[i].getName():
artistes[i].addSong(songs[i].getTitle())
artistes[i].addAlbum(songs[i].getAlbum())
artisteMap.put(songArtistes[i],artistes[i])
if songs[i].getAlbum().getTitle() == albums[i].getTitle():
albums[i].addSong(songs[i].getTitle())
albums[i].addArtiste(songs[i].getArtiste())
albumMap.put(songAlbum[i],albums[i])
sortSongs()
#Creating entry box to allow searching.
searchBox = Entry(root, width = 50)
searchBox.pack()
searchBox.focus_force()
#Creating search button.
searchButton = Button(root,text='Search')
searchButton.pack()
#Creating labels to display details of particular song, album or artiste in Tkinter window.
label2 = StringVar()
label3 = StringVar()
label4 = StringVar()
songLabel2 = Label(root,textvariable=label2, width = 70)
songLabel2.pack()
songLabel3 = Label(root,textvariable=label3, width = 70)
songLabel3.pack()
songLabel4 = Label(root,textvariable=label4, width = 70)
songLabel4.pack()
#A function to search in hashtable using the search term as a key.
#It displays information on a given song, artiste or album typed in.
def search(event):
#Getting information entered into searchbox.
searchTerm = searchBox.get()
#Checking to see if it exists in either the song, artiste or album
#hashmap and displaying the appropriate information. If not, returns a prompt.
if songMap.get(searchTerm)!= None and songMap.get(searchTerm) in songMap.data:
label2.set("Artiste: " + str(songMap.get(searchTerm).getArtiste()))
label3.set("Length: " + str(songMap.get(searchTerm).getLength()))
label4.set("Album: " + str(songMap.get(searchTerm).getAlbum()))
elif albumMap.get(searchTerm) != None and albumMap.get(searchTerm) in albumMap.data:
label2.set("Title: " + str(albumMap.get(searchTerm).getTitle()))
label3.set("Artiste: " + str(albumMap.get(searchTerm).getArtiste()))
label4.set("Songs: " + str(albumMap.get(searchTerm).getSongs()))
elif artisteMap.get(searchTerm) != None and artisteMap.get(searchTerm) in artisteMap.data:
label2.set("Name: " + str(artisteMap.get(searchTerm).getName()))
label3.set("Songs: " + str(artisteMap.get(searchTerm).getSongs()))
label4.set("Albums: " + str(artisteMap.get(searchTerm).getAlbums()))
else:
label2.set(searchTerm + " does not exist in directory")
label3.set("")
label4.set("")
#Binding search button with search function.
searchButton.bind("<Button-1>",search)
root.mainloop()
|
import copy
import os
from importlib import import_module
from importlib.util import find_spec as importlib_find
def import_string(dotted_path):
"""
解析 xx.xx这种路径并导入相应模块
Import a dotted module path and return the attribute/class designated by the
last name in the path. Raise ImportError if the import failed.
"""
try:
# str.split 字符串分割 可以指定次数 rsplit 从右边开始分割
module_path, class_name = dotted_path.rsplit('.', 1)
except ValueError as err:
raise ImportError("%s doesn't look like a module path" % dotted_path) from err
# importlib.import_module 程序式地导入模块
module = import_module(module_path)
try:
return getattr(module, class_name)
except AttributeError as err:
raise ImportError('Module "%s" does not define a "%s" attribute/class' % (
module_path, class_name)
) from err
def autodiscover_modules(*args, **kwargs):
"""
Auto-discover INSTALLED_APPS modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
You may provide a register_to keyword parameter as a way to access a
registry. This register_to object must have a _registry instance variable
to access it.
"""
from django.apps import apps
register_to = kwargs.get('register_to')
for app_config in apps.get_app_configs():
for module_to_search in args:
# Attempt to import the app's module.
try:
if register_to:
before_import_registry = copy.copy(register_to._registry)
import_module('%s.%s' % (app_config.name, module_to_search))
except Exception:
# Reset the registry to the state before the last import
# as this import will have to reoccur on the next request and
# this could raise NotRegistered and AlreadyRegistered
# exceptions (see #8245).
if register_to:
register_to._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have the module in question, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(app_config.module, module_to_search):
raise
def module_has_submodule(package, module_name):
"""See if 'module' is in 'package'."""
try:
package_name = package.__name__
package_path = package.__path__
except AttributeError:
# package isn't a package.
return False
full_module_name = package_name + '.' + module_name
try:
return importlib_find(full_module_name, package_path) is not None
except ModuleNotFoundError:
# When module_name is an invalid dotted path, Python raises
# ModuleNotFoundError.
return False
def module_dir(module):
"""
Find the name of the directory that contains a module, if possible.
Raise ValueError otherwise, e.g. for namespace packages that are split
over several directories.
"""
# Convert to list because __path__ may not support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) == 1:
return paths[0]
else:
filename = getattr(module, '__file__', None)
if filename is not None:
return os.path.dirname(filename)
raise ValueError("Cannot determine directory containing %s" % module)
|
import logging
import os
from itertools import chain
from markdown import Markdown
from more_itertools import chunked
from pelican import signals, Readers, PagesGenerator
from pelican.contents import Page
from pelican.readers import BaseReader
from pelican.themes.webosbrew import pagination_data
from repogen import funding
from repogen.common import parse_package_info
log = logging.getLogger(__name__)
class PackageInfoReader(BaseReader):
enabled = True
file_extensions = ['yml', 'py']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._md = Markdown(**self.settings['MARKDOWN'])
def read(self, filename):
info = parse_package_info(filename, offline='CI' not in os.environ)
metadata = {
'title': info['title'],
'override_save_as': f'apps/{info["id"]}.html',
'template': 'app',
'status': 'hidden',
'modified': info['lastmodified'],
'manifest': info['manifest'],
'detailIcon': info.get('detailIconUri', info['iconUri']),
'sponsor_links': funding.parse_links(info.get('funding', None)),
'package_info': info
}
return self._md.convert(info['description']), metadata
def readers_init(readers: Readers):
readers.reader_classes['yml'] = PackageInfoReader
readers.reader_classes['py'] = PackageInfoReader
def add_app_indices(generator: PagesGenerator):
packages = list(
sorted(filter(lambda x: x is not None, map(lambda page: page.metadata.get('package_info', None),
chain(generator.pages, generator.hidden_pages))),
key=lambda info: info['title'].lower()))
pages = list(chunked(packages, generator.settings['DEFAULT_PAGINATION']))
pages_count = len(pages)
for index, items in enumerate(pages):
metadata = {
'title': 'Apps',
'override_save_as': 'apps/index.html' if index == 0 else f'apps/page/{index + 1}.html',
'template': 'apps',
'status': 'hidden',
'packages': items,
'pagination': pagination_data(index + 1, pages_count, apps_list_href) if pages_count > 1 else None,
}
generator.hidden_pages.append(Page('', metadata=metadata, settings=generator.settings,
source_path=f'apps-page-{index + 1}.html', context=generator.context))
def apps_list_href(page):
return '/apps' if page <= 1 else f'/apps/page/{page}'
def register():
signals.readers_init.connect(readers_init)
signals.page_generator_finalized.connect(add_app_indices)
pass
|
import sys
def load_data(path):
with open(path, "r") as f:
return [int(i) for i in f.read().strip().split("\t")]
def redistribute(memory_banks):
blocks, n_banks = max(memory_banks), len(memory_banks)
start = memory_banks.index(blocks)
memory_banks[start] = 0
for i in range(start + 1, start + blocks + 1):
memory_banks[i % n_banks] += 1
def count_redistributions(memory_banks):
cache, count = {}, 0
while True:
redistribute(memory_banks)
count += 1
if (state := tuple(memory_banks)) in cache:
break
cache[state] = count
return count, cache[state]
if __name__ == "__main__":
memory_banks = load_data(sys.argv[1])
count, last_seen = count_redistributions(memory_banks)
print(f"Part 1: {count}")
print(f"Part 2: {count - last_seen}")
|
from sys import stdin
# Kadane Algorithm
n = int(stdin.readline())
li = [int(c) for c in stdin.readline().split()]
msf = li[0]
meh = li[0]
for i in range(1, n):
meh = max(li[i], meh + li[i])
msf = max(meh, msf)
print(msf)
|
from typing import Optional, Protocol
import requests
from montag.util.decorators import debug
class HttpResponse(Protocol):
status_code: int
def json(self) -> dict:
...
@property
def text(self) -> str:
...
class HttpAdapter:
@debug
def get(
self,
url: str,
params: Optional[dict] = None,
headers: Optional[dict] = None,
) -> HttpResponse:
return requests.get(url, params=params, headers=headers)
@debug
def post(
self,
url: str,
data: Optional[dict] = None,
json: Optional[dict] = None,
headers: Optional[dict] = None,
) -> HttpResponse:
return requests.post(url, data=data, json=json, headers=headers)
@debug
def put(
self,
url: str,
data: Optional[dict] = None,
json: Optional[dict] = None,
headers: Optional[dict] = None,
) -> HttpResponse:
return requests.put(url, data=data, json=json, headers=headers)
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
# base import
from gevent import monkey
monkey.patch_all()
import time
import math
import os
import sys
sys.path.append("Src/")
import logging
from flask import Flask
from gevent.pywsgi import WSGIServer
from Config import ConfigManager
ACCESS_LOG_PATH = "logs/app_access.log"
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
logger = logging.getLogger()
def init_log():
file_handler = logging.FileHandler(ACCESS_LOG_PATH)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
return logger
def init_config():
app.config.from_pyfile('config.py')
app.config["MONGODB_SETTINGS"] = {
'db': ConfigManager.base_config.setting.get("db_name"),
'host': ConfigManager.base_config.setting.get("db_host"),
'port': ConfigManager.base_config.setting.get("db_port"),
'username': ConfigManager.base_config.setting.get("db_user"),
'password': ConfigManager.base_config.setting.get("db_pass"),
}
def init_app():
init_config()
init_log()
def start_app():
from Web.admin import admin
from Web.api import api
admin.init_app(app)
api.init_app(app)
http_server = WSGIServer((ConfigManager.base_config.setting.get("web_bind_host"), ConfigManager.base_config.setting.get("web_bind_port")), app, log=logger, error_log=logger)
http_server.serve_forever()
def run():
init_app()
start_app()
if __name__ == '__main__':
run() |
import os
import django
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SASTest.settings')
django.setup()
from main.models import MarginClass, CI050, CC050
import xml.etree.cElementTree as ET
import datetime
import random
ACCOUNTS = ["SchmidFinancials", "MayerTraders", "MuellerEnterprise"]
CLEARING_MEMBER = ["Schmid", "Mayer", "Mueller"]
START_TIME = datetime.datetime.strptime("1 May 2020 9 0 0", '%d %b %Y %H %M %S')
END_TIME = datetime.datetime.strptime("1 May 2020 17 0 0", '%d %b %Y %H %M %S')
def generate_correct_data(today):
'''generate correct data by writing the same data in the xml files and the database'''
today = today.replace(hour=START_TIME.hour, minute=START_TIME.minute, second=0)
# set the root for the xml files
root_CI050_today = ET.Element("root")
root_CC050_yesterday = ET.Element("root")
root_CI050_yesterday = ET.Element("root")
# get margin classes as list
margin_classes_db = MarginClass.objects.all()
margin_classes = []
for margin_class in margin_classes_db:
margin_classes.append(margin_class.margin_class)
yesterday = today - datetime.timedelta(days=1)
yesterday = yesterday.replace(hour=END_TIME.hour, minute=END_TIME.minute, second=0)
today_CI050_data = []
yesterday_CI050_data = []
yesterday_CC050_data = []
# generate data
entry = {}
for margin_class in margin_classes:
for account, clearing_member in zip(ACCOUNTS, CLEARING_MEMBER):
entry.update({"clearing_member": clearing_member})
entry.update({"account": account})
entry.update({"margin_class": margin_class})
entry.update({"margin": 100})
entry.update({"report_date": today})
entry.update({"report_time": today})
today_CI050_data.append(entry)
entry = {}
entry = {}
for margin_class in margin_classes:
for account, clearing_member in zip(ACCOUNTS, CLEARING_MEMBER):
entry.update({"clearing_member": clearing_member})
entry.update({"account": account})
entry.update({"margin_class": margin_class})
entry.update({"margin": 100})
entry.update({"report_date": yesterday})
entry.update({"report_time": yesterday})
yesterday_CI050_data.append(entry)
entry = {}
entry = {}
for margin_class in margin_classes:
for account, clearing_member in zip(ACCOUNTS, CLEARING_MEMBER):
entry.update({"clearing_member": clearing_member})
entry.update({"account": account})
entry.update({"margin_class": margin_class})
entry.update({"margin": 100})
entry.update({"report_date": yesterday})
yesterday_CC050_data.append(entry)
entry = {}
# generate xml files
for data in today_CI050_data:
entry = ET.SubElement(root_CI050_today, "entry")
ET.SubElement(entry, "Clearing_Member", name="clearing_member").text = data["clearing_member"]
ET.SubElement(entry, "Account", name="account").text = data["account"]
ET.SubElement(entry, "Margin_Class", name="margin_class").text = data["margin_class"]
ET.SubElement(entry, "Margin", name="margin").text = str(data["margin"])
ET.SubElement(entry, "Report_Date", name="report_date").text = str(data["report_date"])
ET.SubElement(entry, "Report_Time", name="report_time").text = str(data["report_time"])
for data in yesterday_CI050_data:
entry = ET.SubElement(root_CI050_yesterday, "entry")
ET.SubElement(entry, "Clearing_Member", name="clearing_member").text = data["clearing_member"]
ET.SubElement(entry, "Account", name="account").text = data["account"]
ET.SubElement(entry, "Margin_Class", name="margin_class").text = data["margin_class"]
ET.SubElement(entry, "Margin", name="margin").text = str(data["margin"])
ET.SubElement(entry, "Report_Date", name="report_date").text = str(data["report_date"])
ET.SubElement(entry, "Report_Time", name="report_time").text = str(data["report_time"])
for data in yesterday_CC050_data:
entry = ET.SubElement(root_CC050_yesterday, "entry")
ET.SubElement(entry, "Clearing_Member", name="clearing_member").text = data["clearing_member"]
ET.SubElement(entry, "Account", name="account").text = data["account"]
ET.SubElement(entry, "Margin_Class", name="margin_class").text = data["margin_class"]
ET.SubElement(entry, "Margin", name="margin").text = str(data["margin"])
ET.SubElement(entry, "Report_Date", name="report_date").text = str(data["report_date"])
# save data in db
for entity in [today_CI050_data, yesterday_CI050_data, yesterday_CC050_data]:
for entry in entity:
margin_class = MarginClass.objects.get(margin_class=entry['margin_class'])
if 'report_time' in entry:
objects = CI050.objects.filter(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'],
report_time=entry['report_time'])
if not objects.exists():
model = CI050.objects.create(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'],
report_time=entry['report_time'])
model.save()
else:
objects = CC050.objects.filter(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'])
if not objects.exists():
model = CC050.objects.create(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'])
model.save()
# save xml files
tree = ET.ElementTree(root_CC050_yesterday)
tree.write("../Reports/CC050-{}-{}-{}.xml".format(yesterday.day, yesterday.month, yesterday.year))
tree = ET.ElementTree(root_CI050_yesterday)
tree.write("../Reports/CI050-{}-{}-{}.xml".format(yesterday.day, yesterday.month, yesterday.year))
tree = ET.ElementTree(root_CI050_today)
tree.write("../Reports/CI050-{}-{}-{}.xml".format(today.day, today.month, today.year))
def generate_false_data(today):
'''generate false data by writing a different margin in type of xml reports while the others have the same data'''
today = today.replace(hour=START_TIME.hour, minute=START_TIME.minute, second=0)
# set the root for the xml files
root_CI050_today = ET.Element("root")
root_CC050_yesterday = ET.Element("root")
root_CI050_yesterday = ET.Element("root")
# get margin classes as list
margin_classes_db = MarginClass.objects.all()
margin_classes = []
for margin_class in margin_classes_db:
margin_classes.append(margin_class.margin_class)
yesterday = today - datetime.timedelta(days=1)
yesterday = yesterday.replace(hour=END_TIME.hour, minute=END_TIME.minute, second=0)
# store the data in a list of dictionaries
today_CI050_data = []
yesterday_CI050_data = []
yesterday_CC050_data = []
# generate data
entry = {}
for margin_class in margin_classes:
for account, clearing_member in zip(ACCOUNTS, CLEARING_MEMBER):
entry.update({"clearing_member": clearing_member})
entry.update({"account": account})
entry.update({"margin_class": margin_class})
entry.update({"margin": 100})
entry.update({"report_date": today})
entry.update({"report_time": today})
today_CI050_data.append(entry)
entry = {}
entry = {}
for margin_class in margin_classes:
for account, clearing_member in zip(ACCOUNTS, CLEARING_MEMBER):
entry.update({"clearing_member": clearing_member})
entry.update({"account": account})
entry.update({"margin_class": margin_class})
entry.update({"margin": 100})
entry.update({"report_date": yesterday})
entry.update({"report_time": yesterday})
yesterday_CI050_data.append(entry)
entry = {}
entry = {}
for margin_class in margin_classes:
for account, clearing_member in zip(ACCOUNTS, CLEARING_MEMBER):
entry.update({"clearing_member": clearing_member})
entry.update({"account": account})
entry.update({"margin_class": margin_class})
entry.update({"margin": 100})
entry.update({"report_date": yesterday})
yesterday_CC050_data.append(entry)
entry = {}
# generate xml files
for data in today_CI050_data:
entry = ET.SubElement(root_CI050_today, "entry")
ET.SubElement(entry, "Clearing_Member", name="clearing_member").text = data["clearing_member"]
ET.SubElement(entry, "Account", name="account").text = data["account"]
ET.SubElement(entry, "Margin_Class", name="margin_class").text = data["margin_class"]
ET.SubElement(entry, "Margin", name="margin").text = str(data["margin"])
ET.SubElement(entry, "Report_Date", name="report_date").text = str(data["report_date"])
ET.SubElement(entry, "Report_Time", name="report_time").text = str(data["report_time"])
for data in yesterday_CI050_data:
entry = ET.SubElement(root_CI050_yesterday, "entry")
ET.SubElement(entry, "Clearing_Member", name="clearing_member").text = data["clearing_member"]
ET.SubElement(entry, "Account", name="account").text = data["account"]
ET.SubElement(entry, "Margin_Class", name="margin_class").text = data["margin_class"]
ET.SubElement(entry, "Margin", name="margin").text = str(data["margin"])
ET.SubElement(entry, "Report_Date", name="report_date").text = str(data["report_date"])
ET.SubElement(entry, "Report_Time", name="report_time").text = str(data["report_time"])
for data in yesterday_CC050_data:
entry = ET.SubElement(root_CC050_yesterday, "entry")
ET.SubElement(entry, "Clearing_Member", name="clearing_member").text = data["clearing_member"]
ET.SubElement(entry, "Account", name="account").text = data["account"]
ET.SubElement(entry, "Margin_Class", name="margin_class").text = data["margin_class"]
ET.SubElement(entry, "Margin", name="margin").text = str(200)
ET.SubElement(entry, "Report_Date", name="report_date").text = str(data["report_date"])
# save data in db
for entity in [today_CI050_data, yesterday_CI050_data, yesterday_CC050_data]:
for entry in entity:
margin_class = MarginClass.objects.get(margin_class=entry['margin_class'])
if 'report_time' in entry:
objects = CI050.objects.filter(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'],
report_time=entry['report_time'])
if not objects.exists():
model = CI050.objects.create(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'],
report_time=entry['report_time'])
model.save()
else:
objects = CC050.objects.filter(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'])
if not objects.exists():
model = CC050.objects.create(clearing_member=entry['clearing_member'], account=entry['account'],
margin_class=margin_class, margin=int(entry['margin']),
report_date=entry['report_date'])
model.save()
# save xml files
tree = ET.ElementTree(root_CC050_yesterday)
tree.write("../Reports/CC050-{}-{}-{}.xml".format(yesterday.day, yesterday.month, yesterday.year))
tree = ET.ElementTree(root_CI050_yesterday)
tree.write("../Reports/CI050-{}-{}-{}.xml".format(yesterday.day, yesterday.month, yesterday.year))
tree = ET.ElementTree(root_CI050_today)
tree.write("../Reports/CI050-{}-{}-{}.xml".format(today.day, today.month, today.year))
if __name__ == '__main__':
date = datetime.datetime.strptime("16 May 2020", '%d %b %Y')
# generate_correct_data(date)
generate_false_data(date)
|
#!/usr/bin/env python
"""
isodate.py
Functions for manipulating a subset of ISO8601 date, as specified by
<http://www.w3.org/TR/NOTE-datetime>
Exposes:
- parse(s)
s being a conforming (regular or unicode) string. Raises ValueError for
invalid strings. Returns a float (representing seconds from the epoch;
see the time module).
- parse_datetime(s) # if datetime module is available
s being a conforming (regular or unicode) string. Raises ValueError for
invalid strings. Returns a datetime instance.
- asString(i)
i being an integer or float. Returns a conforming string.
TODO:
- Precision? it would be nice to have an interface that tells us how
precise a datestring is, so that we don't make assumptions about it;
e.g., 2001 != 2001-01-01T00:00:00Z.
Thanks to Andrew Dalke for datetime support.
"""
__license__ = """
Copyright (c) 2002-2005 Mark Nottingham <mnot@pobox.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Note: datetime support added by Andrew Dalke <dalke@dalkescientific.com>.
All copyrightable changes by Andrew Dalke are released into the public domain.
No copyright protection is asserted.
"""
import sys, time, re, operator
from types import IntType, FloatType
from calendar import timegm
try:
import datetime
except ImportError:
_has_datetime = 0
else:
_has_datetime = 1
__version__ = "0.7"
date_parser = re.compile(r"""^
(?P<year>\d{4,4})
(?:
-
(?P<month>\d{1,2})
(?:
-
(?P<day>\d{1,2})
(?:
T
(?P<hour>\d{1,2})
:
(?P<minute>\d{1,2})
(?:
:
(?P<second>\d{1,2})
(?:
\.
(?P<dec_second>\d+)?
)?
)?
(?:
Z
|
(?:
(?P<tz_sign>[+-])
(?P<tz_hour>\d{1,2})
:
(?P<tz_min>\d{2,2})
)
)
)?
)?
)?
$""", re.VERBOSE)
def parse(s):
""" parse a string and return seconds since the epoch. """
assert isinstance(s, basestring)
r = date_parser.search(s)
try:
a = r.groupdict('0')
except:
raise ValueError, 'invalid date string format'
d = timegm(( int(a['year']),
int(a['month']) or 1,
int(a['day']) or 1,
int(a['hour']),
int(a['minute']),
int(a['second']),
0,
0,
0
))
return d - int("%s%s" % (
a.get('tz_sign', '+'),
( int(a.get('tz_hour', 0)) * 60 * 60 ) + \
( int(a.get('tz_min', 0)) * 60 ))
)
if _has_datetime:
def parse_datetime(s):
""" parse a string and return a datetime object. """
assert isinstance(s, basestring)
r = date_parser.search(s)
try:
a = r.groupdict('0')
except:
raise ValueError, 'invalid date string format'
dt = datetime.datetime(int(a['year']),
int(a['month']) or 1,
int(a['day']) or 1,
# If not given these will default to 00:00:00.0
int(a['hour']),
int(a['minute']),
int(a['second']),
# Convert into microseconds
int(a['dec_second'])*100000,
)
tz_hours_offset = int(a['tz_hour'])
tz_mins_offset = int(a['tz_min'])
if a.get('tz_sign', '+') == "-":
return dt + datetime.timedelta(hours = tz_hours_offset,
minutes = tz_mins_offset)
else:
return dt - datetime.timedelta(hours = tz_hours_offset,
minutes = tz_mins_offset)
def asString(i):
""" given seconds since the epoch, return a dateTime string. """
assert type(i) in [IntType, FloatType]
year, month, day, hour, minute, second, wday, jday, dst = time.gmtime(i)
o = str(year)
if (month, day, hour, minute, second) == (1, 1, 0, 0, 0): return o
o = o + '-%2.2d' % month
if (day, hour, minute, second) == (1, 0, 0, 0): return o
o = o + '-%2.2d' % day
if (hour, minute, second) == (0, 0, 0): return o
o = o + 'T%2.2d:%2.2d' % (hour, minute)
if second != 0:
o = o + ':%2.2d' % second
o = o + 'Z'
return o
def _cross_test():
for iso in ("1997-07-16T19:20+01:00",
"2001-12-15T22:43:46Z",
"2004-09-26T21:10:15Z",
"2004",
"2005-04",
"2005-04-30",
"2004-09-26T21:10:15.1Z",
"2004-09-26T21:10:15.1+05:00",
"2004-09-26T21:10:15.1-05:00",
):
timestamp = parse(iso)
dt1 = datetime.datetime.utcfromtimestamp(timestamp)
dt2 = parse_datetime(iso)
if (dt1 != dt2 and
dt1 != dt2.replace(microsecond=0)):
raise AssertionError("Different: %r != %r" %
(dt1, dt2))
if __name__ == "__main__":
print parse("1997-07-16T19:20+01:00")
print parse("2001-12-15T22:43:46Z")
print parse("2004-09-26T21:10:15Z")
if _has_datetime:
_cross_test()
print parse_datetime("1997-07-16T19:20+01:00")
print parse_datetime("2001-12-15T22:43:46Z")
print parse_datetime("2004-09-26T21:10:15Z")
|
# Generated by Django 3.0.10 on 2021-01-13 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('agony', '0003_qanda_recommended'),
]
operations = [
migrations.AddField(
model_name='qanda',
name='salutation',
field=models.CharField(blank=True, default='', max_length=200),
),
]
|
import os
import io
from dataclasses import dataclass
from typing import Any, Dict, List, Tuple, Union
from .read import AppBlockReader
from ... import structs
@dataclass(frozen=True)
class _FSTEntry:
name: str
deleted: bool
secondary_index: int
@dataclass(frozen=True)
class FSTDirectory(_FSTEntry):
children: 'List[Union[FSTDirectory, FSTFile]]'
@dataclass(frozen=True)
class FSTFile(_FSTEntry):
offset: int
size: int
class FSTProcessor:
def __init__(self, fst_struct: Any):
self._name_map = self.__get_names(fst_struct)
self._offset_factor = fst_struct.offset_factor
# add root entry to list
self._entries = (fst_struct.root, *fst_struct.entries)
@classmethod
def try_load(cls, reader: AppBlockReader) -> 'FSTProcessor':
'''
Loads the FST from the provided reader
Raises an exception if the data does not represent a valid FST
'''
# check first block before loading entire file
block = reader.load_next_block()[1]
if block[:4] != b'FST\0':
raise RuntimeError('first input does not contain FST')
# write previously read block to stream, then read remaining blocks
fst_stream = io.BytesIO()
fst_stream.write(block)
reader.write_all(fst_stream)
fst_stream.seek(0, os.SEEK_SET)
# parse FST from stream
return cls(structs.fst.parse_stream(fst_stream))
def flatten(self) -> Tuple[Dict[str, FSTDirectory], Dict[str, FSTFile]]:
'''
Flattens the FST into two dictionaries containing complete paths
and entries for directories and files respectively
'''
directories = {} # type: Dict[str, FSTDirectory]
files = {} # type: Dict[str, FSTFile]
def process_file(entry: FSTFile, parent_path: str) -> None:
path = os.path.join(parent_path, entry.name)
assert path not in files
files[path] = entry
def process_directory(entry: FSTDirectory, parent_path: str) -> None:
path = os.path.join(parent_path, entry.name)
assert path not in directories
directories[path] = entry
for child in entry.children:
if isinstance(child, FSTDirectory):
process_directory(child, path)
else:
process_file(child, path)
process_directory(self.get_tree(), '')
return directories, files
def get_tree(self) -> FSTDirectory:
'''
Returns the directory tree of the provided FST
'''
self._curr_index = 0
assert self._entries[self._curr_index].type.directory
return self.__process_directory()
def __process_directory(self) -> FSTDirectory:
'''
Recursively reads the entries of the directory at the current index
'''
dir_entry = self._entries[self._curr_index]
self._curr_index += 1
children = [] # type: List[Union[FSTDirectory, FSTFile]]
# iterate over following entries until end is reached
while self._curr_index < dir_entry.next_entry_index:
curr_entry = self._entries[self._curr_index]
if curr_entry.type.directory:
# directory
children.append(self.__process_directory())
else:
# file
children.append(self.__process_file())
# make sure the current index equals the end index
if self._curr_index > dir_entry.next_entry_index:
raise RuntimeError('something went wrong, inner entry read more than it should have')
return FSTDirectory(
self._name_map[dir_entry.name_offset],
dir_entry.type.deleted,
dir_entry.secondary_index,
children
)
def __process_file(self) -> FSTFile:
'''
Reads a file entry at the current index
'''
entry = self._entries[self._curr_index]
self._curr_index += 1
# calculate real offset
offset = entry.offset_raw
if not entry.flags.offset_in_bytes:
offset *= self._offset_factor
return FSTFile(
self._name_map[entry.name_offset],
entry.type.deleted,
entry.secondary_index,
offset,
entry.size
)
def __get_names(self, fst_struct: Any) -> Dict[int, str]:
'''
Create mapping of 'offset -> name' based on list of names
'''
offset = 0
name_map = {}
for name in fst_struct.names:
name_map[offset] = name
offset += len(name) + 1 # + trailing nullbyte
return name_map
|
import dataclasses
import typing
import marshmallow
from .typing_json import Converter
def dataclass_json(obj=None, *, converter=Converter()):
def inner(obj):
schema = {}
annotations = typing.get_type_hints(obj)
for field in dataclasses.fields(obj):
metadata = {"metadata": field.metadata.copy()}
if field.default_factory is not dataclasses.MISSING:
metadata["missing"] = field.default_factory
metadata["metadata"]["default_factory"] = field.default_factory
elif field.default is not dataclasses.MISSING:
metadata["missing"] = field.default
if field.default is not dataclasses.MISSING:
metadata["metadata"]["default"] = field.default
schema[field.name] = converter.convert(
annotations[field.name], metadata=metadata
)
SchemaClass = marshmallow.Schema.from_dict(schema, name=obj.__name__)
class Schema(SchemaClass):
@marshmallow.post_load
def __make_object(self, data, **kwargs):
return obj(**data)
obj.schema = Schema()
return obj
if obj is not None:
return inner(obj)
return inner
|
#!/usr/bin/env python3
import copy
from sys import argv
file_r, file_w = argv[1:]
file_opened = open(file_r, 'r')
file_write = open(file_w, 'w')
portnames = ['bgp', 'biff', 'bootpc', 'bootps', 'chargen', 'cmd', 'daytime', 'discard', 'dnsix', 'domain', 'drip', 'echo', 'exec', 'finger', 'ftp', 'ftp-data', 'gopher', 'hostname', 'ident', 'irc', 'isakmp', 'klogin', 'kshell', 'login', 'lpd', 'mobile-ip', 'nameserver', 'netbios-dgm', 'netbios-ns', 'netbios-ss', 'nntp', 'non500-isakmp', 'ntp', 'onep-plain', 'onep-tls', 'pim-auto-rp', 'pop2', 'pop3', 'rip', 'smtp', 'snmp', 'snmptrap', 'sunrpc', 'syslog', 'tacacs', 'talk', 'telnet', 'tftp', 'time', 'uucp', 'who', 'whois', 'www', 'xdmcp']
for line in file_opened:
portslist = []
portslist1 = []
portslist2 = []
if line.count(' eq ') == 1:
if 'range' in line:
line = line.split()
newline = copy.deepcopy(line)
index_eq = line.index('eq')
index_range = line.index('range')
if index_eq < index_range:
for i in line[:index_range]:
if i.isdigit() or i in portnames:
portslist.append(i)
newline.remove(i)
else:
for i in line[index_eq:]:
if i.isdigit() or i in portnames:
portslist.append(i)
newline.remove(i)
else:
line = line.split()
newline = copy.deepcopy(line)
for i in line:
if i.isdigit() or i in portnames:
portslist.append(i)
newline.remove(i)
firstport = line.index(portslist[0])
for k in portslist:
newline_copy = copy.deepcopy(newline)
newline_copy.insert(firstport, k)
new_command_string = ' ' + ' '.join(newline_copy) + '\n'
file_write.write(new_command_string)
elif line.count(' eq ') > 1:
line = line.split()
occ1 = line.index('eq')
occ2 = line[line.index('eq')+1:].index('eq') + line.index('eq') + 1
newline1 = copy.deepcopy(line[:occ2])
newline2 = copy.deepcopy(line[occ2:])
for i in line[:occ2]:
if i.isdigit() or i in portnames:
portslist1.append(i)
newline1.remove(i)
for i in line[occ2:]:
if i.isdigit() or i in portnames:
portslist2.append(i)
newline2.remove(i)
firstport1 = line[:occ2].index(portslist1[0])
firstport2 = line[occ2:].index(portslist2[0])
for i in portslist1:
newline1_copy = copy.deepcopy(newline1)
newline1_copy.insert(firstport1, i)
for y in portslist2:
newline2_copy = copy.deepcopy(newline2)
newline2_copy.insert(firstport2, y)
new_command_string = ' ' + ' '.join(newline1_copy) + ' ' + ' '.join(newline2_copy) + '\n'
file_write.write(new_command_string)
else:
file_write.write(line)
file_opened.close()
file_write.close()
|
first_name = input()
second_name = input()
delimiter = input()
print(f"{first_name}{delimiter}{second_name}") |
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect
from django.urls import reverse
from .models import *
from .my_util import get_select_money
DEFALUT_SORT = 0 # 综合排序
PRICE_SORT = 1 # 按价格排序
SALENUM_SORT = 2 #按销量排序
SUCCESS = 1
NOT_LOGIN = 2
NO_GOODS = 3
# Create your views here.
def home(req):
# 获取顶部轮播数据
wheels = MyWheel.objects.raw("SELECT * FROM axf_wheel")
# 拿导航的数据
navs = MyNav.objects.all()
# 把必购的数据查询出来返回给前端
mustbuys = MustBuy.objects.all()
# 商店数据
shops = MyShop.objects.all()
# 拿到主要信息
main_shows = MainShow.objects.all()
data = {
'title': '首页',
'swipers': wheels,
'navs': navs,
'mustbuys': mustbuys,
'shop_img': shops[0],
'shop_1_3': shops[1:3],
'shop_3_7': shops[3:7],
'shop_7_11': shops[7:],
'main_shows': main_shows
}
return render(req, 'home/home.html', data)
# 闪购的API
def market(req, c_id, sub_c_id, order_by_type):
# 拿到全部的分类数据
all_types = GoodsType.objects.all()
# 在商品表 去获取对应分类下的数据
goods_data = MyGoods.objects.filter(categoryid=int(c_id))
# 如果二级子分类的id 不是0 那么 我们需要在一级分类查询出的数据 再进行过滤
if (int(sub_c_id) != 0):
goods_data = goods_data.filter(childcid=int(sub_c_id))
# 一定是先根据一 二级分类排序以后 去做排序
order_by_type = int(order_by_type)
if order_by_type == PRICE_SORT:
goods_data = goods_data.order_by("price")
elif order_by_type == SALENUM_SORT:
# 根据销量排序
goods_data = goods_data.order_by("productnum")
else:
pass
# 拿当前用户点击的那个分类数据
current_type = all_types.get(typeid=c_id)
# 拿二级分类的字符串
# 低端
# sub_type_str = current_type.childtypenames
# sub_type_array = sub_type_str.split("#")
# res_types = []
# for i in sub_type_array:
# tmp = i.split(":")
# res_types.append(tmp)
# 稍微好一点的写法
res_types = [i.split(":") for i in current_type.childtypenames.split("#")]
print(res_types)
data = {
'title': "闪购",
'all_types': all_types,
'goods_data': goods_data,
'current_c_id': c_id, #返回用户点击那个分类id
'sub_types': res_types,
'current_sub_c_id': sub_c_id,# 返回用户当前点击的二级分类数据\
'sort_type': order_by_type
}
return render(req, 'market/market.html', data)
@login_required(login_url='/axf/login')
def cart(req):
# 拿用户
user = req.user
# 获取该用户的购物车数据
cart_items = Cart.objects.filter(user_id=user.id)
# 判断是否全选
is_selected_all = True #默认是全选
if cart_items.filter(is_select=False).exists():
is_selected_all = False
# 算钱
data = {
'user': user,
'cart_items': cart_items,
'title': "购物车",
'is_all_select': is_selected_all,
'sum_money': get_select_money(user)
}
return render(req, 'cart/cart.html', data)
# @login_required(login_url="/axf/login")
def mine(req):
user = req.user
is_login = False
u_name = ""
u_icon = ""
if isinstance(user, MyUser):
is_login = True
u_name = user.username
u_icon = user.icon.url
data = {
'title': '我的',
'login_status': is_login,
'u_name': u_name,
'icon': "/static/uploads/" + u_icon
}
return render(req, 'mine/mine.html', data)
def register(req):
if req.method == "GET":
return render(req, 'user/register.html')
else:
param = req.POST
icon_file = req.FILES['u_icon']
u_name = param.get('u_name')
pwd = param.get('pwd')
confirm_pwd = param.get('confirm_pwd')
email = param.get('email')
if u_name and pwd and pwd==confirm_pwd and len(u_name)>3:
# 判断用户是否被注册 .exists() 是看你查询的数据集是否为空 如果数据集为空返回的是false 有数据就返回true
if MyUser.objects.filter(username=u_name).exists():
return HttpResponse("该用户已经被注册")
else:
MyUser.objects.create_user(
username=u_name,
password=pwd,
email=email,
icon=icon_file
)
return redirect("axf:mine")
return redirect("axf:mine")
def login_api(req):
if req.method == "GET":
return render(req, 'user/login.html')
else:
params = req.POST
u_name = params.get("u_name")
pwd = params.get("pwd")
# 数据格式校验
if u_name and pwd and len(u_name) > 3:
# 做用户校验
user = authenticate(username=u_name, password=pwd)
if user:
# 校验通过以后 让用户登录
login(req, user)
# 跳转到 我的 页面
return redirect(reverse("axf:mine"))
else:
return redirect(reverse("axf:login"))
else:
return HttpResponse("密码过短")
def logout_api(req):
logout(req)
return redirect(reverse("axf:mine"))
def cart_api(req):
user = req.user
data = {}
if not isinstance(user, MyUser):
data["code"] = NOT_LOGIN
data["msg"] = "未登录"
data["data"] = "/axf/login"
return JsonResponse(data)
# 拿参数
param = req.POST
g_id = param.get("g_id")
opreate_type = param.get("opreate_type")
# 拿对应的商品
goods = MyGoods.objects.get(pk=int(g_id))
cart_item = Cart.objects.filter(
user=user,
goods=goods
)
# 判断库存
if goods.storenums <= 0:
data['code'] = NO_GOODS
data['msg'] = "库存不足"
data['data'] = ""
return JsonResponse(data)
goods_num = 0
if opreate_type == 'add':
if cart_item.exists():
# 拿到对应购物车商品的信息
my_cart_item = cart_item.first()
my_cart_item.num += 1
my_cart_item.save()
goods_num = my_cart_item.num
else:
Cart.objects.create(
user=user,
goods=goods
)
goods_num = 1
data['code'] = SUCCESS
data['msg'] = "ok"
data['data'] = goods_num
return JsonResponse(data)
else:
# 减操作
item = cart_item.first()
# 购物车商品数量减一
item.num -= 1
item.save()
goods_num = item.num
if item.num <= 0:
# 如果商品数量减到0 那我们直接删除数据
item.delete()
data['code'] = SUCCESS
data['msg'] = 'ok'
data['data'] = goods_num
return JsonResponse(data)
def cart_item_change(req):
# 拿用户
user = req.user
data = {}
# 拿操作类型
o_type = req.POST.get("o_type")
c_id = int(req.POST.get('c_id'))
# 拿到购物车商品数据
cart_item = Cart.objects.get(pk=c_id)
if o_type == "add":
# 判断库存
if cart_item.goods.storenums < 1:
data['code'] = NO_GOODS
data['msg'] = "当前商品库存不足"
data['data'] = ''
return JsonResponse(data)
cart_item.num += 1
cart_item.save()
# 算钱
sum_money = get_select_money(user)
data['code'] = SUCCESS
data['msg'] = 'ok'
data['data'] = {
'sum_money': sum_money,
'current_item_num': cart_item.num
}
return JsonResponse(data)
else:
# 减操作
cart_item.num -= 1
cart_item.save()
if cart_item.num == 0:
# 如果数量减到0 那我们需要删除商品数据
cart_item.delete()
# 算钱
money = get_select_money(user)
data['code'] = SUCCESS
data['msg'] = 'ok'
data['data'] = {
'sum_money': money,
'current_item_num': cart_item.num
}
return JsonResponse(data)
# 3购物车商品选中
def select_cart_item(req):
user = req.user
c_id = int(req.POST.get("c_id"))
# 拿购物车数据
cart_item = Cart.objects.get(pk=c_id)
cart_item.is_select = not cart_item.is_select
cart_item.save()
# 算钱
money = get_select_money(user)
# 判断全选按钮状态
is_selected_all = True #默认是全选
if Cart.objects.filter(user=user, is_select=False).exists():
is_selected_all = False
data = {}
data['code'] = SUCCESS
data['msg'] = 'ok'
data['data'] = {
'is_select': cart_item.is_select,
'money': money,
'is_all_select': is_selected_all
}
return JsonResponse(data)
def select_all(req):
user = req.user
# 判断商品是不是全选
un_select_items = Cart.objects.filter(
user=user,
is_select=False
)
is_select_all = True
if un_select_items.count() == 0:
# 说明当前是全选, 将所有的用户购物车商品变成全不选
Cart.objects.filter(user=user).update(is_select=False)
is_select_all = False
else:
un_select_items.update(is_select=True)
is_select_all = True
money = get_select_money(user)
data = {}
data['code'] = SUCCESS
data['msg'] = 'ok'
data['data'] = {
'money': money,
'is_all_select': is_select_all
}
return JsonResponse(data)
|
class Square:
def __init__(self, id, x, y, data):
self.id = id
self.x = x
self.y = y
self.data = data
|
# #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Модуль журналирования
Флаг SHOW_ON_DISPLAY управляет отображением на экране
"""
# pylint: disable=line-too-long
import os
import sys
from xml.etree.ElementInclude import include
# флаг: True - отображать на экране
SHOW_ON_DISPLAY = True
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0])) + "/"
DIRNAME = get_script_path()
FILENAME = DIRNAME + "pyss.log"
def reset():
with open(FILENAME, "wb"):
pass
def printLine(msg=""):
with open(FILENAME, "a") as f:
f.write(msg + "\n")
if SHOW_ON_DISPLAY:
display(msg)
def display(msg=""):
sys.stdout.write(str(msg) + '\n')
def info(msg):
with open(FILENAME, "a") as f:
f.write(msg + "\n")
if SHOW_ON_DISPLAY:
display(msg)
def warn(msg):
with open(FILENAME, "a") as f:
f.write("WARN " + msg + "\n")
if SHOW_ON_DISPLAY:
display(msg)
def dump(obj, objName="obj", incl=None, excl=None):
r = ""
l = sorted(dir(obj))
for attr in l:
if (not attr.startswith("_")) and (incl is None or attr in incl) and ((excl is None or attr not in excl)):
if r:
r += "\n"
r += "%s.%s = %s" % (objName, attr, getattr(obj, attr))
return r
if __name__ == '__main__':
def main():
print "?"
main()
|
#!/usr/bin/env python3
import sys
SEP = '\t'
class Mapper(object):
def __init__(self, infile=sys.stdin, separator=SEP):
self.infile = infile
self.sep = separator
def emit(self, key, value):
sys.stdout.write(f"{key}{self.sep}{value}\n")
def map(self):
cont_line = 1
for line in self:
line.split('\n')
for word in line.split():
word = ''.join(e for e in word if e.isalnum()).lower()
self.emit(word, cont_line)
cont_line += 1
def __iter__(self):
for line in self.infile:
yield line.split(self.sep, 1)[1]
if __name__ == "__main__":
mapper = Mapper()
mapper.map()
|
"""
PyDetex
https://github.com/ppizarror/PyDetex
PIPELINES
Defines the pipelines which apply parsers.
"""
__all__ = [
'simple',
'strict',
'PipelineType'
]
import pydetex.parsers as par
from pydetex.utils import ProgressBar
from typing import Callable
PipelineType = Callable
def simple(
s: str,
lang: str = 'en',
show_progress: bool = False,
replace_pydetex_tags: bool = True,
remove_common_tags: bool = True,
**kwargs
) -> str:
"""
The most simple pipeline ever.
:param s: String latex
:param lang: Language tag of the code
:param show_progress: Show progress bar
:param replace_pydetex_tags: Replace pydetex tags like symbols, cites
:param remove_common_tags: Call ``remove_common_tags`` parser
:return: String with no latex!
"""
if len(s) == 0:
return s
pb = kwargs.get('progressbar', ProgressBar(steps=17 if replace_pydetex_tags else 16)) if show_progress else None
s = '\n'.join(s.splitlines()) # Removes \r\n
s = par.process_inputs(s, pb=pb)
s = par.remove_comments(s, pb=pb)
s = par.process_begin_document(s, pb=pb)
s = par.simple_replace(s, pb=pb)
s = par.process_def(s, pb=pb, replace=kwargs.get('replace_defs', False))
if remove_common_tags:
s = par.remove_common_tags(s, pb=pb)
s = par.process_cite(s, pb=pb, compress_cite=kwargs.get('compress_cite', True))
s = par.process_citeauthor(s, lang, pb=pb)
s = par.process_ref(s, pb=pb)
s = par.process_labels(s, pb=pb)
s = par.process_items(s, pb=pb)
s = par.process_quotes(s, pb=pb)
s = par.process_chars_equations(s, lang, True, pb=pb)
s = par.unicode_chars_equations(s, pb=pb)
s = par.remove_comments(s, pb=pb) # comments, replace tags, strip
if replace_pydetex_tags:
s = par.replace_pydetex_tags(s, pb=pb, **kwargs)
s = par.strip_punctuation(s, pb=pb)
if s[-1] == '\\':
s = s[0:len(s) - 1]
return s
def strict(
s: str,
lang: str = 'en',
show_progress: bool = False,
**kwargs
) -> str:
"""
Apply simple + removes all commands.
:param s: String latex
:param lang: Language tag of the code
:param show_progress: Show progress bar
:return: String with no latex!
"""
pb = ProgressBar(steps=22) if show_progress else None
if 'progressbar' not in kwargs.keys():
# noinspection PyTypeChecker
kwargs['progressbar'] = pb
s = simple(s, lang, replace_pydetex_tags=False, remove_common_tags=False,
show_progress=show_progress, **kwargs)
s = par.process_chars_equations(s, lang, False, pb=pb)
s = par.remove_equations(s, pb=pb)
s = par.remove_environments(s, pb=pb)
s = par.remove_commands_param(s, lang, pb=pb)
s = par.remove_commands_param_noargv(s, pb=pb)
s = par.remove_comments(s, pb=pb)
s = par.replace_pydetex_tags(s, pb=pb, **kwargs)
s = par.strip_punctuation(s, pb=pb)
return s
|
#!/usr/bin/env python3
import sys
from collections import Counter
if __name__ == '__main__':
X = int(input())
x = input().split()
count_x = Counter(x)
N = int(input())
shoe_size = []
for _ in range(N):
i = input().split()
shoe_size.append(i)
res = 0
for s in shoe_size:
if count_x[s[0]] != 0:
res += int(s[1])
count_x[s[0]] -= 1
print(res)
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from poker.hand import Combo,Hand,Range
from calculation import holdem_calc
from flask import Flask, render_template, redirect, url_for, request, json
import asyncio
import numpy as np
import json as pyjson
#import pandas as pd
app = Flask(__name__)
#Narrows villians range by taking the preflop action as input
#Hero will be RFI / vs. Raise / vs. 3-bet / 4-bet / etc. against x position to narrow ranges
#Assumes GTO preflop 100BB deep and hero follows charts
def narrowRange(action, villian_position):
#Button RFI range -> Villian is on the button and raises first
if action == "RFI" and villian_position == "BU":
return Range('22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o')
#CO RFI
#HJ RFI
#LJ RFI
#Button 3-bet
#CO 3-bet
#HJ 3-bet
#CO 4-bet
#Button 5-bet
return None
def getVillianRange(action, villain_position, hero_position):
#Button RFI range -> Villian is on the button and raises first
if action == "RFI" and villain_position == "BU":
return Range('22+,A2s+,K2s+,Q2s+,J2s+,T2s+,95s+,85s+,74s+,63s+,53s+,43s,A2o+,K8o+,Q8o+,J8o+,T8o+,97o+,87o,76o,65o,54o')
elif action == "RFI" and villain_position == "CO":
return Range('22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+')
elif action == "RFI" and villain_position == "HJ":
return Range('22+,A2s+,K2s+,Q5s+,J6s+,T6s+,96s+,85s+,75s+,65s,54s,A5o+,K9o+,QTo+')
elif action == "RFI" and villain_position == "HJ":
return Range('22+,A2s+,K3s+,Q6s+,J7s+,T7s+,98s,86s+,76s,65s,A8o+,KJo+,QJo')
elif action == "RFI" and villain_position == "LJ":
return Range('33+,A2s+,K7s+,Q9s+,J9s+,T9s,98s,87s,76s,65s,A9o+,KTo+,QTo+,JTo')
#Button 3bet Range
elif action == "3bet" and villain_position == "BU":
if hero_position == "CO":
return Range('TT+,55,AQs+,A9s-A6s,A4s-A3s,K9s,K7s,QJs,Q9s,J9s,AKo,AJo-ATo,KJo+,QJo')
elif hero_position == "HJ":
return Range('JJ+,66,AQs+,A9s-A6s,A4s-A3s,KTs-K8s,QTs-Q9s,T9s,AKo,AJo,KQo')
elif hero_position == "LJ":
return Range('JJ+,AQs+,A9s-A8s,A4s-A3s,K9s,QJs,T9s,AKo,AJo,KQo')
elif action == "3bet" and villain_position == "CO":
if hero_position == "HJ":
return Range('88+,A9s+,A5s-A4s,KTs+,QJs,AJo+,KQo')
elif hero_position == "LJ":
return Range('88+,ATs+,A5s,KTs+,QJs,AQo+,KQo')
elif action == "3bet" and villain_position == "HJ":
if hero_position == "LJ":
return Range('99+,ATs+,A5s,KTs+,QJs,AQo+,KQo')
#3-bet call
elif action == "3-bet call" and villain_position == "CO":
if hero_position == "BU":
return Range('99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo')
#4-bet range
elif action == "4-bet" and villain_position == "CO":
if hero_position == "BU":
return Range('TT+,AQs+,A2s,K5s,AKo,ATo-A9o')
@app.route('/')
def root():
return render_template('index.html')
@app.route('/range',methods = ['GET'])
def getRange():
global villain_range
#Converting range into list of hands
villain_range = Range('99-22,AJs-A8s,A6s-A3s,KTs+,Q9s+,J9s+,T8s+,97s+,86s+,76s,65s,54s,AQo-ATo')
hands_in_range = []
for hand in villain_range.hands:
hands_in_range.append(str(hand))
res = ','.join(hands_in_range)
response = app.response_class(
response=json.dumps(res),
status=200,
mimetype='application/json'
)
return response
@app.route('/range',methods = ['POST'])
def postRange():
app.response_class(
response = request.get_json(),
status=200,
mimetype='application/json'
)
res = ','.join(request.get_json()['range'])
villain_range = Range(res)
return response
@app.route('/calculate',methods = ['POST', 'GET'])
def getOdds():
villain_hand = None
flop = [request.form['board1'], request.form['board2'], request.form['board3']]
#Error handling
if len(flop[0]) == 0:
board = ['5d','6d','7d']
else:
board = flop
turn = request.form['board4']
river = request.form['board5']
if len(turn) != 0:
board.append(turn)
if len(river) != 0:
board.append(river)
hero_hand = Combo( request.form['hero_hand'])
action = request.form['action']
villain_position = request.form['villain_position']
hero_position = request.form['hero_position']
villain_range = getVillianRange(action, villain_position, hero_position)
#Constant Variables
do_exact_calculation = True
verbose = True
run_one_simulation = 1
do_not_read_from_file = None
items = [holdem_calc.calculate_odds_villan(board, do_exact_calculation,
run_one_simulation, do_not_read_from_file ,
hero_hand, villain_hand,
verbose, print_elapsed_time = False) for villain_hand in villain_range.combos]
odds = {}
[odds.update({odd_type: np.mean([res[0][odd_type] for res in items if res])}) for odd_type in ["tie", "win", "lose"]]
#Odds as dictionary with tie, win, loss as keys
#return str(odds.get("win"))
response = app.response_class(
response=json.dumps(odds),
status=200,
mimetype='application/json'
)
return response
if __name__ == '__main__':
# This is used when running locally only. When deploying to Google App
# Engine, a webserver process such as Gunicorn will serve the app. This
# can be configured by adding an `entrypoint` to app.yaml.
# Flask's development server will automatically serve static files in
# the "static" directory. See:
# http://flask.pocoo.org/docs/1.0/quickstart/#static-files. Once deployed,
# App Engine itself will serve those files as configured in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
|
# -*- coding: utf-8 -*-
#
# This file is part of django-powerdns-manager.
#
# django-powerdns-manager is a web based PowerDNS administration panel.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-powerdns-manager
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-powerdns-manager
#
# Copyright 2012 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from django.contrib.contenttypes.models import ContentType
from django.http import HttpResponseRedirect
from django import template
from django.core.exceptions import PermissionDenied
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.db import router
from django.shortcuts import render_to_response
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy, ugettext as _
from django.contrib import messages
from django.shortcuts import render_to_response
from django.template import RequestContext
try:
from django.apps import apps as cache
except ImportError:
from django.db.models.loading import cache
from django.core.urlresolvers import reverse
from powerdns_manager.forms import ZoneTypeSelectionForm
from powerdns_manager.forms import TtlSelectionForm
from powerdns_manager.forms import ClonedZoneDomainForm
from powerdns_manager.utils import generate_serial
from powerdns_manager.utils import generate_api_key
from powerdns_manager.utils import interchange_domain
# Action for
# - set change date
# - set serial (?)
# - set TTL to 300, 3600, 86400
#
#def test_action(modeladmin, request, queryset):
# messages.add_message(request, messages.INFO, 'The test action was successful.')
#test_action.short_description = "Test Action"
def reset_api_key(modeladmin, request, queryset):
DynamicZone = cache.get_model('powerdns_manager', 'DynamicZone')
n = queryset.count()
for domain_obj in queryset:
# Only one DynamicZone instance for each Domain
dz = DynamicZone.objects.get(domain=domain_obj)
if dz.api_key:
dz.api_key = generate_api_key()
dz.save()
else:
messages.error(request, 'Zone is not dynamic: %s' % domain_obj.name)
n = n - 1
if n:
messages.info(request, 'Successfully updated %d domains.' % n)
reset_api_key.short_description = "Reset API Key"
def set_domain_type_bulk(modeladmin, request, queryset):
"""Actions that sets the domain type on the selected Domain instances.
This action first displays a page which provides a dropdown box for the
user to select the domain type and then sets the new domain type on the
sele3cted objects.
It checks if the user has change permission.
Based on: https://github.com/django/django/blob/1.4.2/django/contrib/admin/actions.py
Important
---------
In order to work requires some special form fields (see the template).
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has change permission for the Domain model
if not modeladmin.has_change_permission(request):
raise PermissionDenied
# The user has selected a new domain type through the
# forms.ZoneTypeSelectionForm form. Make the changes to the selected
# objects and return a None to display the change list view again.
#if request.method == 'POST':
if request.POST.get('post'):
domain_type = request.POST.get('domaintype')
n = queryset.count()
if n and domain_type:
for obj in queryset:
obj.type = domain_type
obj.update_serial()
obj.save()
obj_display = force_unicode(obj)
modeladmin.log_change(request, obj, obj_display)
messages.info(request, 'Successfully updated %d domains.' % n)
# Return None to display the change list page again.
return None
info_dict = {
'form': ZoneTypeSelectionForm(),
'queryset': queryset,
'opts': opts,
'app_label': app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
return render_to_response(
'powerdns_manager/actions/set_domain_type.html', info_dict, context_instance=RequestContext(request), mimetype='text/html')
set_domain_type_bulk.short_description = "Set domain type"
def set_ttl_bulk(modeladmin, request, queryset):
"""Actions that resets TTL information on all resource records of the zone
to the specified value.
This action first displays a page which provides an input box to enter
the new TTL.
It checks if the user has change permission.
Based on: https://github.com/django/django/blob/1.4.2/django/contrib/admin/actions.py
Important
---------
In order to work requires some special form fields (see the template).
"""
opts = modeladmin.model._meta
app_label = opts.app_label
Domain = cache.get_model('powerdns_manager', 'Domain')
Record = cache.get_model('powerdns_manager', 'Record')
perm_domain_change = '%s.%s' % (opts.app_label, opts.get_change_permission())
perm_record_change = '%s.change_record' % opts.app_label
if not request.user.has_perms([perm_domain_change, perm_record_change]):
raise PermissionDenied
# Check that the user has change permission for the Re model
if not modeladmin.has_change_permission(request):
raise PermissionDenied
# The user has set a new TTL value through the forms.TtlSelectionForm form.
# Make the changes to the selected objects and return a None to display the
# change list view again.
#if request.method == 'POST':
if request.POST.get('post'):
form = TtlSelectionForm(request.POST)
if form.is_valid():
new_ttl = form.cleaned_data['new_ttl']
reset_zone_minimum = form.cleaned_data['reset_zone_minimum']
n = queryset.count()
record_count = 0
if n and new_ttl:
for domain_obj in queryset:
# Find all resource records of this domain
qs = Record.objects.filter(domain=domain_obj)
# Now set the new TTL
for rr in qs:
rr.ttl = int(new_ttl)
# If this is the SOA record and ``reset_zone_minimum`` has
# been checked, set the minimum TTL of the SOA record equal
# to the ``new_ttl`` value
#
# Important: We do not call ``models.Domain.set_minimum_ttl()``
# because we edit the SOA record here.
#
if reset_zone_minimum and rr.type == 'SOA':
bits = rr.content.split()
# SOA content: primary hostmaster serial refresh retry expire default_ttl
bits[6] = str(new_ttl)
rr.content = ' '.join(bits)
# Save the resource record
rr.save()
rr_display = force_unicode(rr)
modeladmin.log_change(request, rr, rr_display)
# Update the domain serial
domain_obj.update_serial()
record_count += len(qs)
messages.info(request, 'Successfully updated %d zones (%d total records).' % (n, record_count))
# Return None to display the change list page again.
return None
else:
form = TtlSelectionForm()
info_dict = {
'form': form,
'queryset': queryset,
'opts': opts,
'app_label': app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
return render_to_response(
'powerdns_manager/actions/set_ttl.html', info_dict, context_instance=RequestContext(request), mimetype='text/html')
set_ttl_bulk.short_description = "Set Resource Records TTL"
def force_serial_update(modeladmin, request, queryset):
"""Action that updates the serial resets TTL information on all resource
records of the selected zones.
"""
for domain in queryset:
domain.update_serial()
n = queryset.count()
messages.info(request, 'Successfully updated %d zones.' % n)
force_serial_update.short_description = "Force serial update"
def clone_zone(modeladmin, request, queryset):
"""Actions that clones the selected zone.
Accepts only one selected zone.
Clones:
- Resource Records
- Dynamic setting
- Domain Metadata
This action first displays a page which provides an input box to enter
the origin of the new zone.
It checks if the user has add & change permissions.
It checks if a zone with the name that has been entered as new exists in
the database.
Based on: https://github.com/django/django/blob/1.4.2/django/contrib/admin/actions.py
Important
---------
In order to work requires some special form fields (see the template).
"""
opts = modeladmin.model._meta
app_label = opts.app_label
Domain = cache.get_model('powerdns_manager', 'Domain')
Record = cache.get_model('powerdns_manager', 'Record')
DynamicZone = cache.get_model('powerdns_manager', 'DynamicZone')
DomainMetadata = cache.get_model('powerdns_manager', 'DomainMetadata')
# Check the number of selected zones. This action can work on a single zone.
n = queryset.count()
if n != 1:
messages.error(request, 'Only one zone may be selected for cloning.')
return None
# Check permissions
perm_domain_add = '%s.%s' % (opts.app_label, opts.get_add_permission())
perm_domain_change = '%s.%s' % (opts.app_label, opts.get_change_permission())
perm_record_add = '%s.add_record' % opts.app_label
perm_record_change = '%s.change_record' % opts.app_label
if not request.user.has_perms(
[perm_domain_add, perm_domain_change, perm_record_add, perm_record_change]):
raise PermissionDenied
# Check that the user has change permission for the add and change modeladmin forms
if not modeladmin.has_add_permission(request):
raise PermissionDenied
if not modeladmin.has_change_permission(request):
raise PermissionDenied
# The user has set a domain name for the clone through the forms.ClonedZoneDomainForm form.
#if request.method == 'POST':
if request.POST.get('post'):
form = ClonedZoneDomainForm(request.POST)
if form.is_valid():
# Store Data from the form
# Store the new domain name for the clone.
clone_domain_name = form.cleaned_data['clone_domain_name']
if not clone_domain_name:
return None # Should never happen
option_clone_dynamic = form.cleaned_data['option_clone_dynamic']
option_clone_metadata = form.cleaned_data['option_clone_metadata']
# Clone base zone
# At this point queryset contain exactly one object. Checked above.
domain_obj = queryset[0]
# Create the clone (Check for uniqueness takes place in forms.ClonedZoneDomainForm
clone_obj = Domain.objects.create(
name = clone_domain_name,
master = domain_obj.master,
#last_check = domain_obj.last_check,
type = domain_obj.type,
#notified_serial = domain_obj.notified_serial,
account = domain_obj.account,
created_by = request.user # We deliberately do not use the domain_obj.created_by
)
modeladmin.log_addition(request, clone_obj)
# Clone Resource Records
# Find all resource records of this domain
domain_rr_qs = Record.objects.filter(domain=domain_obj)
# Create the clone's RRs
for rr in domain_rr_qs:
# Construct RR name with interchanged domain
clone_rr_name = interchange_domain(rr.name, domain_obj.name, clone_domain_name)
# Special treatment to the content of SOA and SRV RRs
if rr.type == 'SOA':
content_parts = rr.content.split()
# primary
content_parts[0] = interchange_domain(content_parts[0], domain_obj.name, clone_domain_name)
# hostmaster
content_parts[1] = interchange_domain(content_parts[1], domain_obj.name, clone_domain_name)
# Serial. Set new serial
content_parts[2] = generate_serial()
clone_rr_content = ' '.join(content_parts)
elif rr.type == 'SRV':
content_parts = rr.content.split()
# target
content_parts[2] = interchange_domain(content_parts[2], domain_obj.name, clone_domain_name)
clone_rr_content = ' '.join(content_parts)
else:
clone_rr_content = interchange_domain(rr.content, domain_obj.name, clone_domain_name)
# Create and save the cloned record.
clone_rr = Record(
domain = clone_obj,
name = clone_rr_name,
type = rr.type,
content = clone_rr_content,
ttl = rr.ttl,
prio = rr.prio,
auth = rr.auth,
ordername = rr.ordername
)
clone_rr.save()
#modeladmin.log_addition(request, clone_rr)
# Clone Dynamic Zone setting
if option_clone_dynamic:
# Get the base domain's dynamic zone.
# There is only one Dynamic Zone object for each zone.
domain_dynzone_obj = DynamicZone.objects.get(domain=domain_obj)
# Create and save the dynamic zone object for the clone.
clone_dynzone_obj = DynamicZone(
domain = clone_obj,
is_dynamic = domain_dynzone_obj.is_dynamic
)
clone_dynzone_obj.save()
# Clone the zone's metadata
if option_clone_metadata:
# Get the base domain's metadata object.
# There is only one metadata object for each zone.
domain_metadata_obj = DomainMetadata.objects.get(domain=domain_obj)
# Create and save the metadata object for the clone.
clone_metadata_obj = DomainMetadata(
domain = clone_obj,
kind = domain_metadata_obj.kind,
content = domain_metadata_obj.content
)
clone_metadata_obj.save()
messages.info(request, 'Successfully cloned %s zone to %s' % \
(domain_obj.name, clone_domain_name))
# Redirect to the new zone's change form.
return HttpResponseRedirect(reverse('admin:%s_domain_change' % app_label, args=(clone_obj.id,)))
else:
form = ClonedZoneDomainForm()
info_dict = {
'form': form,
'queryset': queryset,
'opts': opts,
'app_label': app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
return render_to_response(
'powerdns_manager/actions/clone_zone.html', info_dict, context_instance=RequestContext(request), mimetype='text/html')
clone_zone.short_description = "Clone the selected zone"
|
load("//bazel/utils:match.bzl", "matchany", "to_glob")
load("//bazel/utils:labels.bzl", "labelrelative")
def _message(ctx, ofile):
"""Commodity function to format a message with the file paths."""
return ctx.attr.progress.format(basename = ofile.basename, path = ofile.short_path)
def _run(ctx, ifile, ipath, ofile):
"""Commodity function to run a shell command with less typing."""
ctx.actions.run_shell(
outputs = [ofile],
inputs = [ifile],
tools = ctx.files.tools,
mnemonic = ctx.attr.mnemonic,
progress_message = _message(ctx, ofile),
command = ctx.attr.command,
env = {
"input": ipath,
"output": ofile.path,
},
)
def debug(on, *args, **kwargs):
"""Prints a message if debugging is enabled."""
if not on:
return
print(*args, **kwargs)
def _transform(ctx):
"""Implementation for the transform rule."""
if ctx.attr.command and not ctx.attr.transform:
fail(("Target '%s' specifies 'command = ...', but this attribute is ignored when no pattern" +
" is supplied with the 'transform' attribute") % (ctx.label.name))
lines = []
for transform in ctx.attr.transform:
lines.append("%s) transform;;" % to_glob(transform))
for include in ctx.attr.include:
lines.append("%s) include;;" % to_glob(include))
transformer = ctx.actions.declare_file("%s-transformer.sh" % (ctx.label.name))
ctx.actions.expand_template(template = ctx.file._transformer, output = transformer, substitutions = {
"{root}": ctx.bin_dir.path,
"{command}": ctx.attr.command,
"{patterns}": "\n".join(lines),
"{debug}": ["", "enabled"][int(ctx.attr.debug)],
}, is_executable = True)
outputs = []
opaths = []
for iattr in ctx.attr.inputs:
for ifile in iattr.files.to_list():
opath = ifile.short_path
info = ("FROM", iattr.label, "PATH", opath, "DIR", ifile.is_directory, "ORIGIN", ifile.short_path)
if not ifile.is_directory:
debug(ctx.attr.debug, "FILE", *info)
opaths.append((ifile, ifile.path, opath))
continue
if not ifile.short_path in ctx.attr.expand:
debug(ctx.attr.debug, "TREE-FILTER", *info)
add = ctx.actions.declare_directory(opath)
outputs.append(add)
ctx.actions.run(inputs = [ifile], outputs = [add], executable = transformer, arguments = [
ifile.path,
add.path,
], tools = ctx.files.tools)
continue
debug(ctx.attr.debug, "TREE-EXPAND", *info)
outputs = []
for output in ctx.attr.expand[ifile.short_path]:
if output.endswith("/"):
add = ctx.actions.declare_directory(output[:-1])
outputs.append(add)
ctx.actions.run(inputs = [ifile], outputs = add, executable = transformer, arguments = [
ifile.path,
add.path, # ctx.bin_dir.path + "/" + ctx.label.package + "/" + opath
], tools = ctx.files.tools)
continue
opaths.append((ifile, ifile.path + "/" + output, ifile.short_path + "/" + output))
for ifile, ipath, opath in opaths:
debug(ctx.attr.debug, "GENERATING FILE", opath, "- FROM TREE?", ifile.is_directory, "- SOURCE PATH", ifile.short_path)
if matchany(opath, ctx.attr.transform, default = False):
ofile = ctx.actions.declare_file(opath)
outputs.append(ofile)
_run(ctx, ifile, ipath, ofile)
continue
if matchany(opath, ctx.attr.include):
ofile = ctx.actions.declare_file(opath)
outputs.append(ofile)
if not ifile.is_directory:
ctx.actions.symlink(output = ofile, target_file = ifile, progress_message = _message(ctx, ofile))
else:
ctx.actions.run(outputs = [ofile], inputs = [ifile], executable = "cp", arguments = ["-f", ipath, ofile.path])
continue
for o in outputs:
debug(ctx.attr.debug, "EXPECTING OUTPUT", o.short_path, "- TREE?", o.is_directory)
return [DefaultInfo(files = depset(outputs))]
transform = rule(
doc = """Transforms or filters the files produced by another rule.
Goal of this rule is to apply a transformation and filter all the files
or directories generated by another rule.
Let's say, for example, that you have a rule named 'compile-idl' that
generates a few .h, .cc, and .c files.
Let's say you need to a) replace a string in the .h files, and b) ensure
that the .cc and .h files are provided as input to a rule that compiles
c++ code.
You can use a 'transform' rule to easily achieve that goal:
transform(
name = "filtered",
inputs = [
":compile-idl",
],
transform = [
"*.h",
],
include = [
"*.cc", "*.h",
],
command = "sed -e 's@IDL_V_1@IDL_DEV@g' < $input > $output",
)
Now you can have another rule like:
cc_library(
...
srcs = [
":filtered",
],
...
)
To build those files, which will only see the .cc and .h files generated.
If your input rule generates 'tree artifacts', eg, a directory created
with ctx.actions.declarea_directory, which bazel treats as a "single entity",
"transform" will handle the files in that tree correctly.
Further, it will be able to turn a 'tree artifact' into normal
file artifact by means of the 'expand' attribute. See below.
""",
implementation = _transform,
attrs = {
"inputs": attr.label_list(
allow_empty = False,
doc = "Targets whose output files need to be transformed",
),
"expand": attr.string_list_dict(
doc = """A set of 'tree artifacts' to transform and turn into 'file artifacts'.
The key for the dictionary is the path of a directory generated by another rule.
The value for the key is a list of files to turn into file artifacts.
For example, let's say that you have a rule:
//frontend/js/data:css
which calls declare_directory to export a path "sass/minified".
Let's say you want to extract two files from this directory:
"prod/small.css", "prod/commented.css", among the hundreds of files generated.
What you can do is have:
transform(
...
inputs = ["//frontend/js/data:css"],
expand = {
"frontend/js/data/css/sass/minified": [
"prod/small.css",
"prod/commented.css",
],
}
...
)
... to only include those two files, as file artifacts, from the
input tree provided by data:css.
Those two files will further be included and transformed as per
include and transform attributes.
If the path to be extracted ends with a '/', eg, 'prod/', it will
be created as an output tree artifact, rather than a file.
""",
),
"include": attr.string_list(
doc = """Patterns selecting which input files to include in the output.
For each input file, each pattern is checked against the full path of
the file.
If there is at least one match, the input file is included in the
output. If there is no match, the file is NOT included in the output.
If no include pattern is provided (default), all input files are included
in the output.
Patterns are in a simplified glob format, where only '*' is understood,
and can only appear once in the pattern (eg, *.c, mpi_*, mpi*.c are valid
patterns, mpi_*foo*.c is not valid.
Patterns can match on artifact trees, eg, outputs other rules are
producing by means of directory_tree. Unless expand is specified,
artifact trees are simply passed over, with no further expansion.
""",
),
"transform": attr.string_list(
doc = """Patterns selecting which input files need to be transformed.
For each input file, each pattern is checked against the full path of
the file.
If there is at least one match, the command specified in the command
attribute is run with this file as input.
If there is no match, the file is either simply copied, or excluded
from the output, depending on the 'include' attribute.
See the 'include' documentation to learn about valid patterns.
""",
),
"command": attr.string(
mandatory = False,
doc = """Command to run for each file matching 'transform'.
The command is run once per file, with $input representing the input
file to transform, and $output the desired output file.
""",
),
"tools": attr.label_list(
doc = """Additional tools to build to run this command.
If your transform rule requires building a tool from your bazel
tree, specify the tool here. For example, let's say you need
a protocol buffer compiler or an idl-compiler from your tree, you
would need:
...
tools = [
"//path/to/idl",
],
...
In your transform target.
""",
),
"debug": attr.bool(
doc = "Turns on debugging. Will print inputs, outputs, and actions.",
default = False,
),
"mnemonic": attr.string(
default = "CustomTransform",
doc = "Passed as 'mnemonic' to the actions run - useful to customize bazel output",
),
"progress": attr.string(
default = "Transforming {basename}",
doc = "Passed as progress_message to the actions run - useful to customize basel output",
),
"_transformer": attr.label(
default = Label("//bazel/utils/transform:transformer.sh"),
allow_single_file = True,
),
},
)
|
dataset_type ='sample'
problem_type = 'image_classification'
dataset_dir = '/lwll'
api_url = 'https://api-dev.lollllz.com/'
problem_task = 'problem_test_image_classification'
team_secret = 'a5aed2a8-db80-4b22-bf72-11f2d0765572'
gov_team_secret = 'mock-secret'
data_paths = ('predefined/scads.spring2021.sqlite3',
'predefined/embeddings/numberbatch-en19.08.txt.gz',
'predefined/embeddings/spring2021_processed_numberbatch.h5')
|
from __future__ import division
import warnings
import torch
import torch.nn as nn
from mmdet.core import RotBox2Polys, polygonToRotRectangle_batch
from mmdet.core import (bbox_mapping, merge_aug_proposals, merge_aug_bboxes,
merge_aug_masks, multiclass_nms, multiclass_nms_rbbox)
from mmdet.core import (build_assigner, bbox2roi, dbbox2roi, bbox2result, build_sampler,
dbbox2result, merge_aug_masks, roi2droi, mask2poly,
get_best_begin_point, polygonToRotRectangle_batch,
gt_mask_bp_obbs_list, choose_best_match_batch,
choose_best_Rroi_batch, dbbox_rotate_mapping, bbox_rotate_mapping)
from ..detectors.base import BaseDetector
from ..builder import DETECTORS, build_backbone, build_head, build_neck
import logging
@DETECTORS.register_module()
class FCOSR(BaseDetector):
def __init__(self,
backbone,
neck=None,
rbbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
assert backbone['type'] in ['ReResNet', 'ResNet', 'ResNeXt', 'Res2Net', 'ResNeSt', 'RegNet',
'CSPDarknet', 'MobileNetV2', 'ShuffleNetV2_Plus', 'MobileNetV2_N']
assert neck['type'] in ['ReFPN', 'FPN', 'PAFPN', 'RFP']
super(FCOSR, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rbbox_head is not None:
self.rbbox_head = build_head(rbbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
@property
def with_rbbox(self):
"""bool: whether the detector has a rbbox head"""
return hasattr(self, 'rbbox_head') and self.rbbox_head is not None
def extract_feat(self, img):
if isinstance(img, list):
img = torch.stack(img)
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.rbbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_rboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_bboxes=None,
proposals=None,
**kwargs):
x = self.extract_feat(img)
losses = dict()
if self.with_rbbox:
outs = self.rbbox_head(x)
loss_inputs = outs + (gt_rboxes, gt_labels, self.train_cfg, gt_bboxes)
losses = self.rbbox_head.loss(*loss_inputs)
return losses
def simple_test(self, img, img_meta, proposals=None, rescale=False):
# 旋转增广测试部分 start
rotate_test_cfg = self.test_cfg.get('rotate_test', dict(enable=False))
assert isinstance(rotate_test_cfg, dict)
if rotate_test_cfg.get('enable'):
"""rot90, 第二个参数表示旋转次数,正数表示逆时针转,负数表示顺时针转。
旋转检测需要图像的batchsize等于1,且输出给模型的tensor会扩展成0、逆90、180、顺90度的tensor,
此时的batchsize=4.
"""
assert len(img) == 1
rotate_test_num = rotate_test_cfg.get('rot90')
if rotate_test_num is None:
rotate_test_num = [0, 1, 2, 3]
assert isinstance(rotate_test_num, list)
imgs = []
for num in rotate_test_num:
if num == 0:
imgs.append(img)
else:
imgs.append(torch.rot90(img, num, [2, 3]))
img = torch.cat(imgs, dim=0)
img_meta *= 4
# 旋转增广测试部分 end
x = self.extract_feat(img)
if self.with_rbbox:
outs = self.rbbox_head(x)
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.rbbox_head.get_rbboxes(*bbox_inputs)
rbbox_results = [
dbbox2result(det_bboxes, det_labels, self.rbbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
else:
raise ValueError('must have at least one head.')
return rbbox_results
def aug_test(self, imgs, img_metas, rescale=None):
raise NotImplementedError
def onnx_export(self, img, img_metas):
"""Test function without nms.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det, class].
"""
x = self.extract_feat(img)
outs = self.rbbox_head.forward_onnx(x)
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
# get pad input shape to support onnx dynamic shape for exporting
# `CornerNet` and `CentripetalNet`, which 'pad_shape' is used
# for inference
img_metas[0]['pad_shape_for_onnx'] = img_shape
bbox_inputs = outs + (img_metas, self.test_cfg)
det_bboxes, det_labels = self.rbbox_head.get_rbboxes_onnx(*bbox_inputs)
return det_bboxes, det_labels |
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
import pickle
# Save Model In artifacts
def save_model(model, path):
pickle.dump(model, path)
# Clustering Class for custom training
class ClusteringModels:
@staticmethod
def kmeans_clustering(X, fit_model=False, **kwargs):
model = KMeans(**kwargs)
if fit_model:
y_pred = model.fit_predict(X)
return model, y_pred
else:
return model
@staticmethod
def dbscan_clustering(X, fit_model=False, **kwargs):
model = DBSCAN(**kwargs)
if fit_model:
y_pred = model.fit_predict(X)
return model, y_pred
else:
return model
@staticmethod
def agglomerative_clustering(X, fit_model=False, **kwargs):
model = AgglomerativeClustering(**kwargs)
if fit_model:
y_pred = model.fit_predict(X)
return model, y_pred
else:
return model
|
from django.db.models import Count
from otcore.relation.models import RelationType, RelatedBasket
from otcore.relation.processing import single_hit_containment, reverse_containment
from otcore.hit.models import Hit, Basket
from otcore.topic.models import Tokengroup
from otcore.settings import otcore_settings
def write_multiple_tokens(filename):
mt = RelationType.objects.get(rtype='MultipleTokens')
with open(filename, 'w') as f:
for r in RelatedBasket.objects.filter(relationtype=mt).select_related('source', 'destination'):
f.write('{}|{}\n'.format(r.source.display_name, r.destination.display_name))
def get_rtypes():
containment, _ = RelationType.objects.get_or_create(rtype="Containment",
defaults = {
'role_from': 'contained by',
'role_to': 'contains',
'symmetrical': False
}
)
subentry, _ = RelationType.objects.get_or_create(rtype="Subentry",
defaults = {
'role_from': 'Main Entry of',
'role_to': 'Subentry of',
'symmetrical': False
}
)
multipletokens, _ = RelationType.objects.get_or_create(rtype="MultipleTokens",
defaults = {
'role_from': 'Multiple Tokens with',
'role_to': 'Multiple Tokens with',
}
)
return {
'containment': containment,
'subentry': subentry,
'multipletokens': multipletokens
}
def nyu_process_single_basket(basket, run_containment=True, run_multipletokens=True):
"""
Runs containment and multipletokens on a on a single basket
"""
rtypes = get_rtypes()
if run_containment:
all_slugs = Hit.objects.order_by('slug').distinct('slug').values_list('slug', flat=True)
slug_sets = [(frozenset(slug.split('-')), slug) for slug in all_slugs]
basket_slugs = basket.topic_hits.order_by('slug').distinct('slug').values_list('slug', flat=True)
for slug in basket_slugs:
slug_set = (frozenset(slug.split('-')), slug)
longer_slugs = [s for s in slug_sets if len(slug_set[0]) < len(s[0])]
single_hit_containment(slug_set, longer_slugs, rtypes)
shorter_slugs = [s for s in slug_sets if len(slug_set[0]) > len(s[0])]
reverse_containment(slug_set, shorter_slugs, rtypes)
if run_multipletokens:
hits = Hit.objects.all()
slug_sets = [(frozenset(hit.slug.split('-')), hit) for hit in hits if len(frozenset(hit.slug.split('-'))) >= otcore_settings.MULTIPLE_RELATIONS_COUNT]
for hit in basket.topic_hits.all():
slug_set = (frozenset(hit.slug.split('-')), hit)
nyu_single_set_multiple_tokens(slug_set, slug_sets, rtypes)
def nyu_global_multiple_tokens():
"""
Runs multipletoken rule on all baskets
"""
rtypes = get_rtypes()
hits = Hit.objects.all()
slug_sets = [(frozenset(hit.slug.split('-')), hit) for hit in hits if len(frozenset(hit.slug.split('-'))) >= otcore_settings.MULTIPLE_RELATIONS_COUNT]
while len(slug_sets) > 1:
nyu_single_set_multiple_tokens(slug_sets[0], slug_sets[1:], rtypes)
slug_sets = slug_sets[1:]
def nyu_single_set_multiple_tokens(slug_set, slug_sets, rtypes):
"""
Check for multiple token relations on a single hit/slug_set pair. If the two baskets share
a common main entry, the cuttoff for multiple relations is actually:
number-of-slugs-in-main-entry + 1
"""
intersections = [s for s in slug_sets if len(s[0].intersection(slug_set[0])) >= otcore_settings.MULTIPLE_RELATIONS_COUNT]
for hit_set in intersections:
hit1 = slug_set[1]
hit2 = hit_set[1]
if hit1.basket != hit2.basket and \
not RelatedBasket.objects.filter(source=hit1.basket, destination=hit2.basket).exists() and \
not RelatedBasket.objects.filter(source=hit2.basket, destination=hit1.basket).exists():
shared_main = get_shared_main_entry(hit1.basket, hit2.basket, rtypes=rtypes)
# print("{} | {} | {}".format(shared_main, hit1, hit2))
# Skip creating relation if the number of shared slugs is less than main_slugs + 1
if shared_main is not None:
main_tokens = [set(hit.slug.split('-')) for hit in shared_main.topic_hits.all()]
# get the slug of the name in common with the subentries
try:
shared_tokens = sorted([tokens for tokens in main_tokens
if tokens < slug_set[0] and tokens < hit_set[0]
], key=lambda x: len(x), reverse=True)[0]
except IndexError:
# means that this combination of hits aren't those shared by this topic.
# Move on to the next set
print("SKIPPING: {} | {} | {}".format(shared_main, hit1, hit2))
continue
main_token_count = len(shared_tokens)
if len(slug_set[0].intersection(hit_set[0])) <= main_token_count:
continue
RelatedBasket.objects.create(
relationtype=rtypes['multipletokens'],
source=hit1.basket,
destination=hit2.basket
)
def get_shared_main_entry(basket1, basket2, rtypes=None):
"""
Takes two baskets, and checks to see if they are both subentries of the same main entry
"""
if rtypes is None:
rtypes = get_rtypes()
first_mains = get_main_entry(basket1, rtypes=rtypes)
second_mains = get_main_entry(basket2, rtypes=rtypes)
if first_mains is not None and second_mains is not None:
for first in first_mains:
for second in second_mains:
if first == second:
return first
def get_main_entry(basket, rtypes=None):
"""
Given a basket, gets the main entry if it's a subentry.
Otherwise, returns none.
Note that a merged basked COULD be a subentry of two different topics. Therefore, this function
returns a list of main topics, rather than a single main
"""
if rtypes is None:
rtypes = get_rtypes()
relations = RelatedBasket.objects.filter(destination=basket, relationtype=rtypes['subentry'])
if len(relations) == 0:
return None
else:
baskets = [relation.source for relation in relations]
return baskets
def delete_multiple_tokens_from_shared_subentries():
"""
Utility function for deleting MultipleToken relationships only when
two topics share the same main entry
"""
rtypes = get_rtypes()
for relation in RelatedBasket.objects.filter(relationtype=rtypes['multipletokens']):
if get_shared_main_entry(relation.source, relation.destination) is not None:
relation.delete()
|
#!/usr/bin/python3
from colored import fg, attr
from distutils.util import strtobool
def run(age):
print(age)
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Tests Spack's ability to substitute a different version into a URL."""
import os
import pytest
import spack.url
@pytest.mark.parametrize('base_url,version,expected', [
# Ensures that substituting the same version results in the same URL
('http://www.mr511.de/software/libelf-0.8.13.tar.gz', '0.8.13',
'http://www.mr511.de/software/libelf-0.8.13.tar.gz'),
# Test a completely different version syntax
('http://www.prevanders.net/libdwarf-20130729.tar.gz', '8.12',
'http://www.prevanders.net/libdwarf-8.12.tar.gz'),
# Test a URL where the version appears twice
# It should get substituted both times
('https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz', '2.1.3',
'https://github.com/hpc/mpileaks/releases/download/v2.1.3/mpileaks-2.1.3.tar.gz'),
# Test now with a partial prefix earlier in the URL
# This is hard to figure out so Spack only substitutes
# the last instance of the version
('https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.0.tar.bz2', '2.2.0',
'https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.2.0.tar.bz2'),
('https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.1.0.tar.bz2', '2.2',
'https://www.open-mpi.org/software/ompi/v2.1/downloads/openmpi-2.2.tar.bz2'),
# No separator between the name and version of the package
('file://{0}/turbolinux702.tar.gz'.format(os.getcwd()), '703',
'file://{0}/turbolinux703.tar.gz'.format(os.getcwd())),
('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '2.0.7',
'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true'),
('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true', '4.7',
'https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v4.7.tgz?raw=true'),
# Package name contains regex characters
('http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz', '1.2.3',
'http://math.lbl.gov/voro++/download/dir/voro++-1.2.3.tar.gz'),
])
def test_url_substitution(base_url, version, expected):
computed = spack.url.substitute_version(base_url, version)
assert computed == expected
|
"""Module for Testing the InVEST cli framework."""
import sys
import os
import shutil
import tempfile
import unittest
import unittest.mock
import contextlib
import json
import importlib
import uuid
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
@contextlib.contextmanager
def redirect_stdout():
"""Redirect stdout to a stream, which is then yielded."""
old_stdout = sys.stdout
stdout_buffer = StringIO()
sys.stdout = stdout_buffer
yield stdout_buffer
sys.stdout = old_stdout
class CLIHeadlessTests(unittest.TestCase):
"""Headless Tests for CLI."""
def setUp(self):
"""Use a temporary workspace for all tests in this class."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Remove the temporary workspace after a test run."""
shutil.rmtree(self.workspace_dir)
def test_run_fisheries_workspace_in_json(self):
"""CLI: Run the fisheries model with JSON-defined workspace."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
datastack_dict = json.load(open(parameter_set_path))
datastack_dict['args']['workspace_dir'] = self.workspace_dir
new_parameter_set_path = os.path.join(
self.workspace_dir, 'paramset.invs.json')
with open(new_parameter_set_path, 'w') as parameter_set_file:
parameter_set_file.write(
json.dumps(datastack_dict, indent=4, sort_keys=True))
with unittest.mock.patch(
'natcap.invest.fisheries.fisheries.execute',
return_value=None) as patched_model:
cli.main([
'run',
'fisheries', # uses an exact modelname
'--datastack', new_parameter_set_path,
'--headless',
])
patched_model.assert_called_once()
def test_run_fisheries(self):
"""CLI: Run the fisheries model through the cli."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
with unittest.mock.patch(
'natcap.invest.fisheries.fisheries.execute',
return_value=None) as patched_model:
cli.main([
'run',
'fisheries', # uses an exact modelname
'--datastack', parameter_set_path,
'--headless',
'--workspace', self.workspace_dir,
])
patched_model.assert_called_once()
def test_run_fisheries_no_workspace(self):
"""CLI: Run the fisheries model through the cli without a workspace."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'run',
'fisheries', # uses an exact modelname
'--datastack', parameter_set_path,
'--headless',
])
self.assertEqual(exit_cm.exception.code, 1)
def test_run_fisheries_no_datastack(self):
"""CLI: Run the fisheries model through the cli without a datastack."""
from natcap.invest import cli
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'run',
'fisheries', # uses an exact modelname
'--headless',
'--workspace', self.workspace_dir,
])
self.assertEqual(exit_cm.exception.code, 1)
def test_run_fisheries_invalid_datastack(self):
"""CLI: Run the fisheries model through the cli invalid datastack."""
from natcap.invest import cli
parameter_set_path = os.path.join(
self.workspace_dir, 'bad-paramset.invs.json')
with open(parameter_set_path, 'w') as paramset_file:
paramset_file.write('not a json object')
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'run',
'fisheries', # uses an exact modelname
'--datastack', parameter_set_path,
'--headless',
])
self.assertEqual(exit_cm.exception.code, 1)
def test_run_ambiguous_modelname(self):
"""CLI: Raise an error when an ambiguous model name used."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'run',
'fish', # ambiguous substring
'--datastack', parameter_set_path,
'--headless',
'--workspace', self.workspace_dir,
])
self.assertEqual(exit_cm.exception.code, 1)
def test_model_alias(self):
"""CLI: Use a model alias through the CLI."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'coastal_blue_carbon', 'cbc_galveston_bay.invs.json')
target = (
'natcap.invest.coastal_blue_carbon.coastal_blue_carbon.execute')
with unittest.mock.patch(target, return_value=None) as patched_model:
cli.main([
'run',
'cbc', # uses an alias
'--datastack', parameter_set_path,
'--headless',
'--workspace', self.workspace_dir,
])
patched_model.assert_called_once()
def test_no_model_given(self):
"""CLI: Raise an error when no model name given."""
from natcap.invest import cli
with self.assertRaises(SystemExit) as exit_cm:
cli.main(['run'])
self.assertEqual(exit_cm.exception.code, 2)
def test_no_model_matches(self):
"""CLI: raise an error when no model name matches what's given."""
from natcap.invest import cli
with self.assertRaises(SystemExit) as exit_cm:
cli.main(['run', 'qwerty'])
self.assertEqual(exit_cm.exception.code, 1)
def test_list(self):
"""CLI: Verify no error when listing models."""
from natcap.invest import cli
with self.assertRaises(SystemExit) as exit_cm:
cli.main(['list'])
self.assertEqual(exit_cm.exception.code, 0)
def test_list_json(self):
"""CLI: Verify no error when listing models as JSON."""
from natcap.invest import cli
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main(['list', '--json'])
# Verify that we can load the JSON object without error
stdout_value = stdout_stream.getvalue()
loaded_list_object = json.loads(stdout_value)
self.assertEqual(type(loaded_list_object), dict)
self.assertEqual(exit_cm.exception.code, 0)
def test_validate_fisheries(self):
"""CLI: Validate the fisheries model inputs through the cli."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
# The InVEST sample data JSON arguments don't have a workspace, so I
# need to add it in.
datastack_dict = json.load(open(parameter_set_path))
datastack_dict['args']['workspace_dir'] = self.workspace_dir
new_parameter_set_path = os.path.join(
self.workspace_dir, 'paramset.invs.json')
with open(new_parameter_set_path, 'w') as parameter_set_file:
parameter_set_file.write(
json.dumps(datastack_dict, indent=4, sort_keys=True))
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'validate',
new_parameter_set_path,
])
self.assertTrue(len(stdout_stream.getvalue()) > 0)
self.assertEqual(exit_cm.exception.code, 0)
def test_validate_fisheries_missing_workspace(self):
"""CLI: Validate the fisheries model inputs through the cli."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
# The InVEST sample data JSON arguments don't have a workspace. In
# this case, I want to leave it out and verify validation catches it.
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'validate',
parameter_set_path,
])
self.assertTrue(len(stdout_stream.getvalue()) > 0)
# Validation failed, not the program.
self.assertEqual(exit_cm.exception.code, 0)
def test_validate_fisheries_missing_workspace_json(self):
"""CLI: Validate the fisheries model inputs through the cli."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
# The InVEST sample data JSON arguments don't have a workspace. In
# this case, I want to leave it out and verify validation catches it.
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'validate',
parameter_set_path,
'--json',
])
stdout = stdout_stream.getvalue()
self.assertTrue(len(stdout) > 0)
self.assertEqual(len(json.loads(stdout)), 1) # workspace_dir invalid
# Validation failed, not the program.
self.assertEqual(exit_cm.exception.code, 0)
def test_validate_invalid_json(self):
"""CLI: Validate invalid json files set an error code."""
from natcap.invest import cli
paramset_path = os.path.join(self.workspace_dir, 'invalid.json')
with open(paramset_path, 'w') as opened_file:
opened_file.write('not a json object')
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'validate',
paramset_path,
'--json',
])
self.assertTrue(len(stdout_stream.getvalue()) == 0)
self.assertEqual(exit_cm.exception.code, 1)
def test_validate_fisheries_json(self):
"""CLI: Validate the fisheries model inputs as JSON through the cli."""
from natcap.invest import cli
parameter_set_path = os.path.join(
os.path.dirname(__file__), '..', 'data', 'invest-test-data',
'fisheries', 'spiny_lobster_belize.invs.json')
# The InVEST sample data JSON arguments don't have a workspace, so I
# need to add it in.
datastack_dict = json.load(open(parameter_set_path))
datastack_dict['args']['workspace_dir'] = self.workspace_dir
# In this case, I also want to set one of the inputs to an invalid path
# to test the presentation of a validation error.
datastack_dict['args']['aoi_vector_path'] = os.path.join(
self.workspace_dir, 'not-a-vector.shp')
new_parameter_set_path = os.path.join(
self.workspace_dir, 'paramset.invs.json')
with open(new_parameter_set_path, 'w') as parameter_set_file:
parameter_set_file.write(
json.dumps(datastack_dict, indent=4, sort_keys=True))
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main([
'validate',
new_parameter_set_path,
'--json',
])
stdout = stdout_stream.getvalue()
stdout_json = json.loads(stdout)
self.assertEqual(len(stdout_json), 1)
# migration path, aoi_vector_path, population_csv_path not found
# population_csv_dir is also incorrect, but shouldn't be marked
# invalid because do_batch is False
self.assertEqual(len(stdout_json['validation_results']), 3)
# Validation returned successfully, so error code 0 even though there
# are warnings.
self.assertEqual(exit_cm.exception.code, 0)
def test_export_python(self):
"""CLI: Export a python script for a given model."""
from natcap.invest import cli
target_filepath = os.path.join(self.workspace_dir, 'foo.py')
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main(['export-py', 'carbon', '-f', target_filepath])
self.assertTrue(os.path.exists(target_filepath))
# the contents of the file are asserted in CLIUnitTests
self.assertEqual(exit_cm.exception.code, 0)
def test_export_python_default_filepath(self):
"""CLI: Export a python script without passing a filepath."""
from natcap.invest import cli
model = 'carbon'
# cannot write this file to self.workspace because we're
# specifically testing the file is created in a default location.
expected_filepath = f'{model}_execute.py'
with redirect_stdout() as stdout_stream:
with self.assertRaises(SystemExit) as exit_cm:
cli.main(['export-py', model])
self.assertTrue(os.path.exists(expected_filepath))
os.remove(expected_filepath)
self.assertEqual(exit_cm.exception.code, 0)
class CLIUnitTests(unittest.TestCase):
"""Unit Tests for CLI utilities."""
def setUp(self):
"""Use a temporary workspace for all tests in this class."""
self.workspace_dir = tempfile.mkdtemp()
def tearDown(self):
"""Remove the temporary workspace after a test run."""
shutil.rmtree(self.workspace_dir)
def test_export_to_python_default_args(self):
"""Export a python script w/ default args for a model."""
from natcap.invest import cli
filename = 'foo.py'
target_filepath = os.path.join(self.workspace_dir, filename)
target_model = 'carbon'
expected_data = 'natcap.invest.carbon.execute(args)'
cli.export_to_python(target_filepath, target_model)
self.assertTrue(os.path.exists(target_filepath))
target_model = cli._MODEL_UIS[target_model].pyname
model_module = importlib.import_module(name=target_model)
spec = model_module.ARGS_SPEC
expected_args = {key: '' for key in spec['args'].keys()}
module_name = str(uuid.uuid4()) + 'testscript'
spec = importlib.util.spec_from_file_location(module_name, target_filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
self.assertEqual(module.args, expected_args)
data_in_file = False
with open(target_filepath, 'r') as file:
for line in file:
if expected_data in line:
data_in_file = True
break
self.assertTrue(data_in_file)
def test_export_to_python_with_args(self):
"""Export a python script w/ args for a model."""
from natcap.invest import cli
target_filepath = os.path.join(self.workspace_dir, 'foo.py')
target_model = 'carbon'
expected_args = {
'workspace_dir': 'myworkspace',
'lulc': 'myraster.tif',
'parameter': 0.5,
}
cli.export_to_python(
target_filepath,
target_model, expected_args)
self.assertTrue(os.path.exists(target_filepath))
module_name = str(uuid.uuid4()) + 'testscript'
spec = importlib.util.spec_from_file_location(module_name, target_filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
self.assertEqual(module.args, expected_args)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras.backend import _preprocess_padding, _preprocess_conv2d_input, shape, image_data_format
from tensorflow.python.ops import array_ops
from dynastes.core import nn as dnn
def depthwise_conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D depthwise deconvolution (i.e.
transposed depthwise convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = dnn.depthwise_conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
|
# Python Script, API Version = V17
import os,time
######## User guide ##########
# run the script inside a project
# it search each component for bodies,
# if any bodies, it group together and save
# to a file with the file name as component name
######## User Input ##########
# please provide the path to save the model
# for example "D:\\Program\\McCad\\"
PathToSave = "D:\\Projects\\IFMIF_DONES\\40_TC_modeling_2020\\decomposed\\PCP"
##############################
def recursive(comp, count):
"this function recursively seprate the components and parts"
#recursively save the bodies for all components
if len(comp.Components)>0:
for icomp in comp.Components :
recursive(icomp,count)
#if bodies, save to the files
if len(comp.GetBodies())>0:
sel = Selection.Create(comp.GetBodies())
result = Copy.ToClipboard(sel)
#create a new document to save the solid to STEP file
DocumentHelper.CreateNewDocument() #;time.sleep(0.2)
result = Paste.FromClipboard()#;time.sleep(0.2)
#time.sleep(1.0)
filename = PathToSave + "\\" + comp.GetName() + ".stp" #+ String(count) + "_"
counter =1
while os.path.exists(filename):
filename = filename[:-4] + "-"+str(counter) + ".stp"
counter += 1
print filename
DocumentSave.Execute(filename,ExportOptions.Create() )#;time.sleep(0.2)
# delete the solid in this document
Delete.Execute(Selection.SelectAll())#;time.sleep(0.2)
CloseWindow()#;time.sleep(0.2)
#time.sleep(1.0)
count += 1
# return count
root = GetRootPart()
count = 1 # file indexes
recursive(root,count) |
# coding: utf-8
# In[1]:
import folium
import os
os.chdir('h:/')
import pandas as pd
# In[2]:
#this table comes from wikipedia
# https://en.wikipedia.org/wiki/List_of_countries_by_oil_production
#but i have changed the names of some countries
#try to keep names consistent with geojson file
df=pd.read_csv('oil production choropleth.csv')
# In[3]:
df['Oil Production']=df['Oil Production'].apply(lambda x: x/1000)
# In[4]:
#location takes two arguments, latitude and longitude
#zoom_start implies zoom level
#1 is world map, 2 is continent map, 3 is region map
#4 is country map, 5 is county map etc
m=folium.Map(location=(30,50), zoom_start=4)
#geo_data is a geojson file
#its a file that indicates country shape on a map
#we can download country and even more detailed level from the first link
#the second link converts all the files in first link to geojson
#https://gadm.org/download_country_v3.html
#https://mapshaper.org/
#here i found the map shape from github
#i just cannot find the original link
#so i just upload it to the repo
#data is the dataframe
#columns would be the columns we use in that dataframe
#we need one column for the region name and the other one for value
#the region name should be consistent with the region name in geojson
#and key_on denotes the key in geojson for region names
#fill_color is just matplotlib cmap
#fill_opacity,line_opacity are plotting options
#legend_name is just the name of the label in matplotlib
#threshold_scale can only take up to six values in a list
#for simplicity, we can use from branca.utilities import split_six
#to get the quantile data equally divided into six parts
m.choropleth(
geo_data=(open("worldmapshape.json",encoding = "utf_8_sig").read()),
name='choropleth',
data=df,
columns=['Country', 'Oil Production'],
key_on='properties.name',
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='Oil Production Thousand Barrels/Day',
threshold_scale=[0.0,1.0, 150.0, 800.0, 2000.0, 4000.0]
)
#layout control is just a map filter
#we can unselect choropleth any time
folium.LayerControl().add_to(m)
display(m)
#in general, folium is a really good wrap up for leaflet.js
#it saves me a lot of time from learning javascript
#it is very straight forward, a very flat learning curve
#there is only one thing i hate about it
#which is the location name is always in local language
#at least google map provides english plus local language
#this is quite annoying, other than that, its pretty cool
|
import numpy as np
import nimfa
V = np.random.rand(40, 100)
pmf = nimfa.Pmf(V, seed="random_vcol", rank=10, max_iter=12, rel_error=1e-5)
pmf_fit = pmf()
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: syft_proto/frameworks/torch/tensors/interpreters/v1/precision.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from syft_proto.types.syft.v1 import id_pb2 as syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2
from syft_proto.frameworks.torch.tensors.interpreters.v1 import additive_shared_pb2 as syft__proto_dot_frameworks_dot_torch_dot_tensors_dot_interpreters_dot_v1_dot_additive__shared__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='syft_proto/frameworks/torch/tensors/interpreters/v1/precision.proto',
package='syft_proto.frameworks.torch.tensors.interpreters.v1',
syntax='proto3',
serialized_options=b'\n@org.openmined.syftproto.frameworks.torch.tensors.interpreters.v1',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nCsyft_proto/frameworks/torch/tensors/interpreters/v1/precision.proto\x12\x33syft_proto.frameworks.torch.tensors.interpreters.v1\x1a!syft_proto/types/syft/v1/id.proto\x1aIsyft_proto/frameworks/torch/tensors/interpreters/v1/additive_shared.proto\"\xe5\x02\n\x14\x46ixedPrecisionTensor\x12,\n\x02id\x18\x01 \x01(\x0b\x32\x1c.syft_proto.types.syft.v1.IdR\x02id\x12\x14\n\x05\x66ield\x18\x03 \x01(\tR\x05\x66ield\x12\x14\n\x05\x64type\x18\x04 \x01(\tR\x05\x64type\x12\x12\n\x04\x62\x61se\x18\x05 \x01(\x05R\x04\x62\x61se\x12\x14\n\x05kappa\x18\x06 \x01(\x05R\x05kappa\x12\x31\n\x14precision_fractional\x18\x07 \x01(\x05R\x13precisionFractional\x12\x12\n\x04tags\x18\x08 \x03(\tR\x04tags\x12 \n\x0b\x64\x65scription\x18\t \x01(\tR\x0b\x64\x65scription\x12`\n\x05\x63hild\x18\n \x01(\x0b\x32J.syft_proto.frameworks.torch.tensors.interpreters.v1.AdditiveSharingTensorR\x05\x63hildBB\n@org.openmined.syftproto.frameworks.torch.tensors.interpreters.v1b\x06proto3'
,
dependencies=[syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2.DESCRIPTOR,syft__proto_dot_frameworks_dot_torch_dot_tensors_dot_interpreters_dot_v1_dot_additive__shared__pb2.DESCRIPTOR,])
_FIXEDPRECISIONTENSOR = _descriptor.Descriptor(
name='FixedPrecisionTensor',
full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='id', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='field', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.field', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='field', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='dtype', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.dtype', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='dtype', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='base', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.base', index=3,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='base', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='kappa', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.kappa', index=4,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='kappa', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='precision_fractional', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.precision_fractional', index=5,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='precisionFractional', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='tags', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.tags', index=6,
number=8, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tags', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.description', index=7,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='description', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='child', full_name='syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor.child', index=8,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='child', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=235,
serialized_end=592,
)
_FIXEDPRECISIONTENSOR.fields_by_name['id'].message_type = syft__proto_dot_types_dot_syft_dot_v1_dot_id__pb2._ID
_FIXEDPRECISIONTENSOR.fields_by_name['child'].message_type = syft__proto_dot_frameworks_dot_torch_dot_tensors_dot_interpreters_dot_v1_dot_additive__shared__pb2._ADDITIVESHARINGTENSOR
DESCRIPTOR.message_types_by_name['FixedPrecisionTensor'] = _FIXEDPRECISIONTENSOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FixedPrecisionTensor = _reflection.GeneratedProtocolMessageType('FixedPrecisionTensor', (_message.Message,), {
'DESCRIPTOR' : _FIXEDPRECISIONTENSOR,
'__module__' : 'syft_proto.frameworks.torch.tensors.interpreters.v1.precision_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.frameworks.torch.tensors.interpreters.v1.FixedPrecisionTensor)
})
_sym_db.RegisterMessage(FixedPrecisionTensor)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
""" Handles basic functionality shared by a number of other modules
Classes:
:py:class:`Base`
Handles basic __init__ and listener patterns shared among several classes
Decorators:
:py:func:`listener`
A class decorator adding a listener without disrupting :py:meth:`~Base._add_listeners`
:py:func:`once`
A class decorator adding a once-only listener without disrupting
:py:meth:`~Base._add_listeners`
:py:func:`withoptions`
A class decorator adding basic :py:class:`~simulations.utils.optionparser.OptionParser`
functionality
"""
from simulations.utils.eventemitter import EventEmitter
from simulations.utils.optionparser import OptionParser
class Base(EventEmitter):
""" The base class that handles common functionality from which other
:py:class:`~simulations.utils.eventemitter.EventEmitter` classes are derived
Keyword Parameters:
default_handlers
If true or not present, adds the default handlers defined in :py:meth:`Base._add_default_listeners`
"""
def __init__(self, *args, **kwdargs):
""" Handles the initialization process
Keyword Parameters:
default_handlers
If true or not present, adds the default handlers defined in :py:meth:`Base._add_default_listeners`
"""
super(Base, self).__init__()
self.options = None
self.args = None
self.oparser = None
if 'default_handlers' not in kwdargs or kwdargs['default_handlers']:
self._add_default_listeners()
self._add_listeners()
def _add_default_listeners(self):
""" Sets up default listeners for various events (should implement)
"""
pass
def _add_listeners(self):
""" Set up listeners for various events (should implement)
"""
pass
def withoptions(klass):
""" A class wrapper that handles using an :py:class:`~simulations.utils.optionparser.OptionParser`
Adds Keyword Parameters:
option_error_handler
An error handler for the :py:class:`~simulations.optionparser.OptionParser`
option_exit_handler
An exit handler for the :py:class:`~simulations.optionparser.OptionParser`
Adds Events:
oparser set up
emitted after the :py:class:`~simulations.utils.optionparser.OptionParser`
is set up and able to add options
"""
old_init = klass.__init__
def newinit(self, *args, **kwdargs):
""" Wraps the old __init__ method and adds functionality for option
parsing
"""
old_init(self, *args, **kwdargs)
self.options = None
self.args = None
self.oparser = OptionParser()
if 'option_error_handler' in kwdargs:
self.oparser.set_error_handler(kwdargs['option_error_handler'])
if 'option_exit_handler' in kwdargs:
self.oparser.set_exit_handler(kwdargs['option_exit_handler'])
self._set_base_options()
self.emit('oparser set up', self)
klass.__init__ = newinit
return klass
def listener(event, handler):
""" Class decorator to add listeners in a brief way
This is effectively the same as adding a call to
:py:meth:`~simulations.utils.eventemitter.EventEmitter.add_listener` in
:py:meth:`~Base._add_listeners`.
Parameters:
event
the name of the event to listen for
handler
the event handler
"""
def wrapper(klass):
""" Wraps _add_listeners on klass, adding a new listener for an event
"""
old_add_listeners = klass._add_listeners
def _add_listeners(self):
""" Sets up listeners for various events
"""
old_add_listeners(self)
self.add_listener(event, handler)
klass._add_listeners = _add_listeners
return klass
return wrapper
def once(event, handler):
""" Class decorator to handler once-listeners in a brief way.
This is effectively the same as adding a call to
:py:meth:`~simulations.utils.eventemitter.EventEmitter.once` in
:py:meth:`~Base._add_listeners`.
Parameters:
event
the name of the event to listen for
handler
the event handler
"""
def wrapper(klass):
""" Wraps _add_listeners on klass, adding a new once-only listener for an event.
"""
old_add_listeners = klass._add_listeners
def _add_listeners(self):
""" Sets up listeners for various events
"""
old_add_listeners(self)
self.once(event, handler)
klass._add_listeners = _add_listeners
return klass
return wrapper
|
import sys, time, string, random, os
data = "memory-20120525.log"
for i in range(1,10):
os.system("date >> %s" %data )
os.system("prstat -s size 1 1 >> %s" %data )
time.sleep(1) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
python3 -m tests.test_adb_device
"""
import os
import threading
import time
import unittest
from pathlib import Path
from pythonadb import ADBClient, ADBDevice, KeyCodes, Intent
from . import get_logger
from .test_const import DEVICE_IP, DESKTOP_FOLDER
log = get_logger("==> test_adb_device")
THIS_FILE_NAME = os.path.basename(__file__)
class ADBDeviceTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ADBClient.disconnect_all()
def setUp(self) -> None:
self.device = ADBDevice(ADBClient(DEVICE_IP))
self.assertTrue(self.device.client.connect())
@unittest.skip
def test_001(self):
print("test_001")
properties = self.device.build_prop
self.assertIsNotNone(properties)
self.assertTrue(len(properties.keys()) > 0)
print(properties.keys())
print(properties.items())
def test_002(self):
print("test_002")
device_name = self.device.name
log.debug(f"device name: {device_name}")
self.assertIsNotNone(device_name)
def test_003(self):
"""Screenshot"""
print("test_003")
api_level = self.device.api_level
log.debug(f"api level: {api_level}")
release = self.device.android_release_version
self.assertIsNotNone(release)
log.debug(f"release: {release}")
local_screenshot = DESKTOP_FOLDER / "screenshot.png"
local_screenshot.unlink(True)
if api_level >= 26:
with open(local_screenshot, "w") as fp:
result = self.device.write_screencap(fp)
self.assertTrue(result)
self.assertTrue(local_screenshot.exists())
local_screenshot.unlink(True)
def test_007(self):
print("test_007")
self.assertTrue(
self.device.async_send_key(KeyCodes.KEYCODE_DPAD_CENTER.value)
.result()
.is_ok()
)
time.sleep(1)
self.assertTrue(
self.device.async_send_key(KeyCodes.KEYCODE_DPAD_DOWN.value)
.result()
.is_ok()
)
def test_008(self):
print("test_008")
future = self.device.async_send_text("a")
self.assertTrue(future.result().is_ok())
def test_009(self):
print("test_009")
self.assertTrue(self.device.clear_package("com.android.bluetooth"))
def test_098(self):
"""Remote screenshot"""
print("test_004")
local_screenshot = DESKTOP_FOLDER / "screenshot.png"
local_screenshot.unlink(True)
tmp_dir = Path("/data/local/tmp")
tmp_file = tmp_dir / local_screenshot.name
if self.device.client.exists(str(tmp_file)):
self.device.client.remove(str(tmp_file))
result = self.device.save_screencap(str(tmp_file))
log.debug(result)
self.assertTrue(result.is_ok())
self.assertTrue(self.device.client.exists(str(tmp_file)))
self.assertTrue(self.device.client.pull(str(tmp_file), DESKTOP_FOLDER))
self.assertTrue(local_screenshot.exists())
local_screenshot.unlink(True)
def test_099(self):
"""Screen Record"""
print("test_005")
local_screen_record = DESKTOP_FOLDER / "screen_record.mp4"
local_screen_record.unlink(True)
tmp_dir = Path("/sdcard")
tmp_file = str(tmp_dir / local_screen_record.name)
if self.device.client.exists(tmp_file):
self.device.client.remove(tmp_file)
time.sleep(1)
log.debug("record screen few seconds...")
self.device.screenrecord(file=tmp_file, bugreport=False, timelimit=6)
log.debug("wait for the device to write the file...")
time.sleep(5)
self.device.client.pull(tmp_file, DESKTOP_FOLDER)
self.assertTrue(local_screen_record.exists())
local_screen_record.unlink(True)
if __name__ == "__main__":
unittest.main()
|
import json
def _serializer(obj):
"""
Render particular types in an appropriate way for logging. Allow
the json module to handle the rest as usual.
"""
# Datetime-like objects
if isinstance(obj, bytes):
return obj.decode('utf-8')
if hasattr(obj, 'isoformat'):
return obj.isoformat().decode('utf-8')
else:
try:
return str(obj)
except Exception:
message = (
"Object of type {0} with value of {1} is not JSON serializable"
).format(type(obj), repr(obj))
raise TypeError(message)
class KeyValueRenderer(object):
"""
Render event_dict as a list of Key=json.dumps(str(Value)) pairs.
This is a drop-in replacement for the structlog
KeyValueRenderer. The primary motivation for using it is to avoid
logging Python object representations for things like datetimes
and unicode strings. json.dumps ensures that strings are
double-quoted, with embedded quotes conveniently escaped.
"""
def __call__(self, logger, name, event_dict):
def serialize(v):
return json.dumps(v, default=_serializer)
return ', '.join(
'{0}={1}'.format(
k, serialize(v)
) for k, v in list(event_dict.items())
)
|
# Generated by Django 3.2.9 on 2021-12-28 22:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20211229_0145'),
]
operations = [
migrations.AlterField(
model_name='response',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='responses', to='main.team'),
),
]
|
from .models import LocatorModel
|
DJANGO_PASSWORD='rBQxfCcdd$DS=R-+4|&X!B4%xSCTGb%d|+R*w+&#E&|-B|dCz4gVfA%/|%%VAB|r'
|
# Without Reduce
product = 1
numbers = [1, 2, 3, 4]
for num in numbers:
product = product * num
# With Reduce
from functools import reduce
product = reduce((lambda x, y: x * y), [1, 2, 3, 4])
|
from __future__ import absolute_import, unicode_literals
import logging
import sys
from virtualenv.util.six import ensure_str
LEVELS = {
0: logging.CRITICAL,
1: logging.ERROR,
2: logging.WARNING,
3: logging.INFO,
4: logging.DEBUG,
5: logging.NOTSET,
}
MAX_LEVEL = max(LEVELS.keys())
LOGGER = logging.getLogger()
def setup_report(verbose, quiet):
verbosity = max(verbose - quiet, 0)
_clean_handlers(LOGGER)
if verbosity > MAX_LEVEL:
verbosity = MAX_LEVEL # pragma: no cover
level = LEVELS[verbosity]
msg_format = "%(message)s"
filelock_logger = logging.getLogger("filelock")
if level <= logging.DEBUG:
locate = "module"
msg_format = "%(relativeCreated)d {} [%(levelname)s %({})s:%(lineno)d]".format(msg_format, locate)
filelock_logger.setLevel(level)
else:
filelock_logger.setLevel(logging.WARN)
formatter = logging.Formatter(ensure_str(msg_format))
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setLevel(level)
LOGGER.setLevel(logging.NOTSET)
stream_handler.setFormatter(formatter)
LOGGER.addHandler(stream_handler)
level_name = logging.getLevelName(level)
logging.debug("setup logging to %s", level_name)
return verbosity
def _clean_handlers(log):
for log_handler in list(log.handlers): # remove handlers of libraries
log.removeHandler(log_handler)
|
vt=[i//9 for i in range(0,81)]
ht=[(i%9)+9 for i in range(0,81)]
st=[i//27*3+(i%9)//3+18 for i in range(0,81)]
print("uint8_t _div_9_table[81]={"+",".join([str(e) for e in vt])+"};// i/9")
print("uint8_t _mod_9_plus_9_table[81]={"+",".join([str(e) for e in ht])+"};// (i%9)+9")
print("uint8_t _div_27_times_3_plus_mod_9_div_3_plus_18[81]={"+",".join([str(e) for e in st])+"};// i/27*3+(i%9)/3+18")
|
#!/bin/env python
# encoding:utf-8
#
#
#
__Author__ = "CORDEA"
__date__ = "2014-08-28"
import sys
arg = sys.argv[1]
infile = open("clinvar", "r")
idList = [r.split("\t")[2] for r in infile.readlines()]
infile.close()
infile = open(arg, "r")
lines = infile.readlines()
infile.close()
outFile = open("dataset/clinvar_osp.vcf", "w")
count = 0
for line in lines:
if count == 0:
pass
#outFile.write(line)
else:
if line.rstrip("\r\n") in idList:
outFile.write(line)
count += 1
outFile.close()
|
import numpy as np
from mootils import paramtools
# Control center: a bunch of useful switches
steps_per_block = 1000
total_blocks = 10
save_to = "generic" # "new folder", "generic" or path to an existing folder
start_from = "cubic" # cubic, conf, custom
add_spherical_confinement = True
add_lamina_attraction = False
add_pulling = False
add_crosslinks = False
has_extrusion = False
# Saving data
base_folder = "/net/levsha/share/simongh/sims/data"
folder_flag = "example" # arbitrary string to add to the folder name
blocks_per_file = 100
# Computational parameters
# For reference, see polychrom/simulation.py
platform = "CUDA"
GPU = "0" # likely to be overridden
integrator = "variableLangevin"
collision_rate = 0.03
error_tol = 0.01 # for variableLangevin
timestep = 20 # for Brownian
verbose = False
max_Ek = 10
PBCbox = False # [x, y, z] or False or "density" to use sphconf_density
# Major simulation parameters
N = 10000 # Note: this will be overwritten if loading a conformation
# Starting conformation
start_cubic_box = None # None if should be computed from N
### FORCES ###
# Spherical confinement
sphconf_density = 0.35
sphconf_k = 6
# Polymer chain forcekit
chains = [(0, None, False)] # Likely to be overwritten
chain_bondlength = 1.0
chain_bondwiggledist = 0.05
chain_anglek = 1.5 # From example.py: 1.5 is reasonable, 4 gives l_p = 4mon, 8 is very stiff
chain_nonbonded = "polynomial" # "hetero", "simple", "polynomial"
chain_nb_repEnergy = 3.0
chain_nb_repRadius = 1.0
chain_nb_attrEnergy = 0.0
chain_nb_attrRadius = 2.0
# Energy minimization
do_energy_minimization = True # Automatically switched off when starting from conf
emin_tolerance=0.3
emin_maxIter = 0
emin_randomOffset = chain_bondwiggledist / 2
|
import os
import pathlib
import frida
process_pid = frida.spawn("dumpme01")
session = frida.attach(process_pid)
script = session.create_script("""
'use strict';
rpc.exports = {
memoryRanges: function (permission_mask) {
return Process.enumerateRangesSync(permission_mask);
},
extractMemory: function (source_address, size) {
if(!source_address){
console.log("NO valid source_address.");
return null;
}
var memoryPage = null;
try {
memoryPage = Memory.readByteArray(ptr(source_address), size);
console.log("memoryPage was read! (compare with python context)");
}
catch(error){
console.log("Catched error: " + error);
console.log(" ***> try to protect and set READ permission.");
var protect_result = Memory.protect(ptr(source_address), size, 'r--');
if(protect_result === true){
console.log("Memory.protect->read successful. Do read.");
try{
memoryPage = Memory.readByteArray(ptr(source_address), size);
}
catch(second_error) {
memoryPage = null;
}
if(!memoryPage) {
console.log("memoryPage: is null again?.");
}
else {
console.log("memoryPage: is NOT null.");
}
return memoryPage;
}
else {
console.log("Memory.protect->read ERROR (" + protect_result + "). Skip.");
memoryPage = null;
}
}
return memoryPage;
}
};
""")
script.load()
myagent = script.exports
read_memory_ranges = myagent.memory_ranges('r--')
readwrite_memory_ranges = myagent.memory_ranges('rw-')
execute_memory_ranges = myagent.memory_ranges('--x')
print("[READ] memory blocks:")
counter = 0
for r in read_memory_ranges:
block = None;
print("({counter}) [{base}] size: {size}".format(counter=counter,
base=r['base'],
size=r['size']))
try:
block = myagent.extract_memory(r['base'], r['size'])
except Exception as e:
print("Exception when: [{base}] [{size}]".format(base=r['base'], size=r['size']))
print("E: [%s]" % str(e))
continue
if block is None:
print(" Memory block was NULL/NONE: excepted.")
counter += 1
|
from models.nvp_network import NonVolumePreservingNetwork
from distributions.generative_distribution import GenerativeDistribution
from tensorflow.keras.layers import Input
class RealNVP(GenerativeDistribution):
def __init__(self,
output_size,
config,
name_scope,
name):
# build the invertible network model
# ------------
invertible_network_config = config["invertible_network"]
self.latent_input_layer = Input(shape=(output_size,))
self.data_input_layer = Input(shape=(output_size,))
# ------------
self.invertible_network = NonVolumePreservingNetwork(config=invertible_network_config,
output_size=output_size,
latent_vector_input=self.latent_input_layer,
data_vector_input=self.data_input_layer,
name_scope=name_scope)
# ------------
# Generative Distribution
super().__init__(output_size, name, name_scope, config)
def build_copy(self, name_scope='distribution_copy'):
return RealNVP(output_size=self.output_size,
config=self.config,
name_scope=name_scope,
name='nvp') |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
import math
TIPO_CHOICES = (
(0, 'Carro'),
(1, 'Moto'),
(2, u'Caminhão'),
(3,'Outro')
)
def distance(origin, destination):
"""
input:
- origin (tuple)
- destination (tuple)
output:
- distance (float)
"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
class BaseRadares(models.Model):
gid = models.AutoField(primary_key=True)
id = models.IntegerField(blank=True, null=True)
lote = models.IntegerField(blank=True, null=True)
codigo = models.CharField(max_length=40, blank=True, null=True)
endereco = models.CharField(max_length=50, blank=True, null=True)
sentido = models.CharField(max_length=100, blank=True, null=True)
referencia = models.CharField(max_length=100, blank=True, null=True)
tipo_equip = models.CharField(max_length=30, blank=True, null=True)
enquadrame = models.CharField(max_length=20, blank=True, null=True)
qtde_fxs_f = models.IntegerField(blank=True, null=True)
data_publi = models.CharField(max_length=24, blank=True, null=True)
velocidade = models.CharField(max_length=15, blank=True, null=True)
latitude_l = models.CharField(max_length=50, blank=True, null=True)
ligado = models.IntegerField(blank=True, null=True)
data_desli = models.CharField(max_length=24, blank=True, null=True)
motivo_des = models.CharField(max_length=254, blank=True, null=True)
mi_style = models.CharField(max_length=254, blank=True, null=True)
mi_prinx = models.IntegerField(blank=True, null=True)
geom = models.TextField(blank=True, null=True) # This field type is a guess.
emme_gid = models.IntegerField(blank=True, null=True)
mdc_gid = models.IntegerField(blank=True, null=True)
def latitude(self):
try:
return float(self.latitude_l.split(' ')[0][1:])
except Exception as x:
print(x)
return ''
def longitude(self):
try:
return float(self.latitude_l.split(' ')[1][:-1])
except Exception as x:
print(x)
return ''
def contagem(self, hora, dia):
pass
def autuacoes(self, mes):
pass
def __unicode__(self):
return u'%s'% self.id
class Meta:
# managed = False
db_table = 'base_radares'
class Trajetos(models.Model):
gid = models.AutoField(primary_key=True)
id = models.IntegerField(blank=True, null=True, )
viagem_id = models.IntegerField(blank=True, null=True, help_text='ID da Viagem', verbose_name='ID Viagem')
tipo = models.IntegerField(blank=True, null=True, choices=TIPO_CHOICES)
data_inicio = models.DateTimeField(null=True, blank=True)
data_final = models.DateTimeField(null=True, blank=True)
origem = models.IntegerField(blank=True, null=True)
destino = models.IntegerField(blank=True, null=True)
v0 = models.IntegerField(blank=True, null=True)
v1 = models.IntegerField(blank=True, null=True)
def distancia(self):
pass
def tempo(self):
pass
def velocidade_media(self):
pass
def __unicode__(self):
return u'%s'% self.id
class Meta:
# managed = False
db_table = 'trajetos'
class Viagens(models.Model):
gid = models.AutoField(primary_key=True)
id = models.IntegerField(blank=True, null=True)
data_inicio = models.DateTimeField(null=True, blank=True)
data_final = models.DateTimeField(null=True, blank=True)
inicio = models.IntegerField(blank=True, null=True)
final = models.IntegerField(blank=True, null=True)
tipo = models.IntegerField(blank=True, null=True, choices=TIPO_CHOICES)
def distancia(self):
try:
ri=BaseRadares.objects.get(codigo__icontains=self.inicio)
rf=BaseRadares.objects.get(codigo__icontains=self.final)
return distance((ri.latitude(),ri.longitude()), (rf.latitude(),rf.longitude() ))
except Exception as x:
print(x)
return ''
def tempo(self):
pass
def __unicode__(self):
return u'%s'% self.id
class Meta:
# managed = False
db_table = 'viagens'
class Contagens(models.Model):
gid = models.AutoField(primary_key=True)
id = models.IntegerField(blank=True, null=True)
data_e_hora = models.DateTimeField(null=True, blank=True)
localidade = models.IntegerField(blank=True, null=True)
tipo = models.IntegerField(blank=True, null=True, choices=TIPO_CHOICES)
contagem = models.IntegerField(blank=True, null=True)
autuacoes = models.IntegerField(blank=True, null=True)
placas = models.IntegerField(blank=True, null=True)
def acuracia(self):
try:
return round(float(self.placas)/float(self.contagem), 2)
except Exception as x:
print(x)
return '-'
def autuacoes_por_placas(self):
try:
return round(float(self.autuacoes)/float(self.placas), 2)
except Exception as x:
print(x)
return '-'
def __unicode__(self):
return u'%s'% self.id
class Meta:
# managed = False
db_table = 'contagens' |
version https://git-lfs.github.com/spec/v1
oid sha256:4ade5c42b41b9d13f22c16bc5e586c609790504eb8759bfa78bd268ca13f5928
size 5350
|
n = int(input())
if n == 0:
print('YONSEI')
else:
print('Leading the Way to the Future')
|
"""
Unit tests for the cert_get_data module
"""
import os
import unittest
from pytrustplatform.cert_get_data import cert_get_skid, cert_get_common_name, create_cert_fingerprint
from pytrustplatform.tests.data import dummy_cert
TEST_CERT_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'dummy_cert.crt')
class TestCertGetSkid(unittest.TestCase):
"""
cert_get_skid unit tests
"""
def test_cert_get_skid(self):
skid = cert_get_skid(TEST_CERT_FILE)
self.assertEqual(skid, dummy_cert.DUMMY_CERT_SKID)
class TestCertGetCommonName(unittest.TestCase):
"""
cert_get_common_name unit tests
"""
def test_get_common_name(self):
cn = cert_get_common_name(TEST_CERT_FILE)
self.assertEqual(cn, dummy_cert.DUMMY_CERT_COMMON_NAME)
class TestCreateCertFingerprint(unittest.TestCase):
"""
create_cert_fingerprint unit tests
"""
def test_create_cert_fingerprint(self):
fp = create_cert_fingerprint(TEST_CERT_FILE)
self.assertEqual(fp, dummy_cert.DUMMY_CERT_FINGERPRINT)
|
from django.forms import Field, ValidationError
from django.utils.translation import ugettext_lazy as _
from attributes.widgets import AttributesFormFieldWidget
from attributes.forms import AttributesForm
class AttributesFormField(Field):
widget = AttributesFormFieldWidget
form = None
def init_form(self, *args, **kwargs):
self.form = AttributesForm(*args, **kwargs)
self.widget.form = self.form
def clean(self, *args, **kwargs):
if not self.form.is_valid():
raise ValidationError(_('Form is invalid.'))
return self.form.cleaned_data
def commit(self, *args, **kwargs):
return self.form.commit(*args, **kwargs)
|
from collections import defaultdict
import ida_hexrays
import ida_lines
import ida_pro
import json
import re
UNDEF_ADDR = 0xFFFFFFFFFFFFFFFF
hexrays_vars = re.compile("^(v|a)[0-9]+$")
def get_expr_name(expr):
name = expr.print1(None)
name = ida_lines.tag_remove(name)
name = ida_pro.str2user(name)
return name
class CFuncGraph:
def __init__(self, highlight):
self.items = [] # list of citem_t
self.reverse = [] # citem_t -> node #
self.succs = [] # list of lists of next nodes
self.preds = [] # list of lists of previous nodes
self.highlight = highlight
def nsucc(self, n):
return len(self.succs[n]) if self.size() else 0
def npred(self, n):
return len(self.preds[n]) if self.size() else 0
def succ(self, n, i):
return self.succs[n][i]
def pred(self, n, i):
return self.preds[n][i]
def size(self):
return len(self.preds)
def add_node(self):
n = self.size()
def resize(array, new_size):
if new_size > len(array):
while len(array) < new_size:
array.append([])
else:
array = array[:new_size]
return array
self.preds = resize(self.preds, n+1)
self.succs = resize(self.succs, n+1)
return n
def add_edge(self, x, y):
self.preds[y].append(x)
self.succs[x].append(y)
def get_pred_ea(self, n):
if self.npred(n) == 1:
pred = self.pred(n, 0)
pred_item = self.items[pred]
if pred_item.ea == UNDEF_ADDR:
return self.get_pred_ea(pred)
return pred_item.ea
return UNDEF_ADDR
def get_node_label(self, n):
item = self.items[n]
op = item.op
insn = item.cinsn
expr = item.cexpr
parts = [ida_hexrays.get_ctype_name(op)]
if op == ida_hexrays.cot_ptr:
parts.append(".%d" % expr.ptrsize)
elif op == ida_hexrays.cot_memptr:
parts.append(".%d (m=%d)" % (expr.ptrsize, expr.m))
elif op == ida_hexrays.cot_memref:
parts.append(" (m=%d)" % (expr.m,))
elif op in [
ida_hexrays.cot_obj,
ida_hexrays.cot_var]:
name = get_expr_name(expr)
parts.append(".%d %s" % (expr.refwidth, name))
elif op in [
ida_hexrays.cot_num,
ida_hexrays.cot_helper,
ida_hexrays.cot_str]:
name = get_expr_name(expr)
parts.append(" %s" % (name,))
elif op == ida_hexrays.cit_goto:
parts.append(" LABEL_%d" % insn.cgoto.label_num)
elif op == ida_hexrays.cit_asm:
parts.append("<asm statements; unsupported ATM>")
# parts.append(" %a.%d" % ())
parts.append(", ")
parts.append("ea: %08X" % item.ea)
if item.is_expr() and not expr is None and not expr.type.empty():
parts.append(", ")
tstr = expr.type._print()
parts.append(tstr if tstr else "?")
return "".join(parts)
# Puts the tree in a format suitable for JSON
def json_tree(self, n):
# Each node has a unique ID
node_info = { "node_id" : n }
item = self.items[n]
# This is the type of ctree node
node_info["node_type"] = ida_hexrays.get_ctype_name(item.op)
# This is the type of the data (in C-land)
if item.is_expr() and not item.cexpr.type.empty():
node_info["type"] = item.cexpr.type._print()
node_info["address"] = "%08X" % item.ea
if item.ea == UNDEF_ADDR:
node_info["parent_address"] = "%08X" % self.get_pred_ea(n)
# Specific info for different node types
if item.op == ida_hexrays.cot_ptr:
node_info["pointer_size"] = item.cexpr.ptrsize
elif item.op == ida_hexrays.cot_memptr:
node_info.update({
"pointer_size": item.cexpr.ptrsize,
"m": item.cexpr.m
})
elif item.op == ida_hexrays.cot_memref:
node_info["m"] = item.cexpr.m
elif item.op == ida_hexrays.cot_obj:
node_info.update({
"name": get_expr_name(item.cexpr),
"ref_width": item.cexpr.refwidth
})
elif item.op == ida_hexrays.cot_var:
_, var_id, old_name, new_name = get_expr_name(item.cexpr).split("@@")
node_info.update({
"var_id": var_id,
"old_name": old_name,
"new_name": new_name,
"ref_width": item.cexpr.refwidth
})
elif item.op in [ida_hexrays.cot_num,
ida_hexrays.cot_str,
ida_hexrays.cot_helper]:
node_info["name"] = get_expr_name(item.cexpr)
# Get info for children of this node
successors = []
x_successor = None
y_successor = None
z_successor = None
for i in range(self.nsucc(n)):
successors.append(self.succ(n, i))
successor_trees = []
if item.is_expr():
if item.x:
for s in successors:
if item.x == self.items[s]:
successors.remove(s)
x_successor = self.json_tree(s)
break
if item.y:
for s in successors:
if item.y == self.items[s]:
successors.remove(s)
y_successor = self.json_tree(s)
break
if item.z:
for s in successors:
if item.z == self.items[s]:
successors.remove(s)
z_successor = self.json_tree(s)
break
if successors:
for succ in successors:
successor_trees.append(self.json_tree(succ))
if successor_trees != []:
node_info["children"] = successor_trees
if x_successor:
node_info["x"] = x_successor
if y_successor:
node_info["y"] = y_successor
if z_successor:
node_info["z"] = z_successor
return node_info
def print_tree(self):
tree = json.dumps(self.json_tree(0))
print(tree)
def dump(self):
print("%d items:" % len(self.items))
for idx, item in enumerate(self.items):
print("\t%d: %s" % (idx, ida_hexrays.get_ctype_name(item.op)))
# print("\t%d: %s" % (idx, self.get_node_label(idx)))
print("succs:")
for parent, s in enumerate(self.succs):
print("\t%d: %s" % (parent, s))
print("preds:")
for child, p in enumerate(self.preds):
print("\t%d: %s" % (child, p))
class GraphBuilder(ida_hexrays.ctree_parentee_t):
def __init__(self, cg):
ida_hexrays.ctree_parentee_t.__init__(self)
self.cg = cg
def add_node(self, i):
n = self.cg.add_node()
if n <= len(self.cg.items):
self.cg.items.append(i)
self.cg.items[n] = i
self.cg.reverse.append((i, n))
return n
def process(self, i):
n = self.add_node(i)
if n < 0:
return n
if len(self.parents) > 1:
lp = self.parents.back().obj_id
for k, v in self.cg.reverse:
if k.obj_id == lp:
p = v
break
self.cg.add_edge(p, n)
return 0
def visit_insn(self, i):
return self.process(i)
def visit_expr(self, e):
return self.process(e)
|
# auth: christian bitter
# name: sm.py
# desc: simple deterministic Finite State Automaton/ Machine definition
import uuid
class State(object):
def __init__(self, name: str, description: str = None):
super(State, self).__init__()
self._id = uuid.uuid1()
self._name = name
self._description = description
def __repr__(self):
return "{} ({})".format(self._name, self._id)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def description(self):
return self._description
def __eq__(self, other):
return other.id == self._id
class Transition(object):
def __init__(
self, f: State, t: State, trigger_fn, name: str = None, description: str = None
):
super(Transition, self).__init__()
self._id = uuid.uuid1()
self._from = f
self._to = t
self._trigger = trigger_fn
self._name = name
self._description = description
def __repr__(self):
return "{} ({}) : {} => {}".format(self._id, self._name, self._from, self._to)
def fires(self) -> bool:
return self._trigger()
@property
def from_state(self):
return self._from
@property
def to_state(self):
return self._to
class StateMachine(object):
def __init__(self, states: list, transitions: list, initial_state: State):
self._id = uuid.uuid1()
self._init = initial_state
self._current_state = self._init
self._states = states.copy()
self._transitions = transitions.copy()
# self._validate()
def _validate(self):
raise ValueError("TODO:")
@property
def current(self):
return self._current_state
def update(self):
# get the transitions for the current state, call fires and see ...
# in order to be deterministic we allow only a single firing transition, although we allow multiple transitions
# per state - but this is the job of the validate function
firing_transition = None
for t in self._transitions:
if t.from_state == self._current_state:
if t.fires():
firing_transition = t
break
if firing_transition:
self._current_state = firing_transition.to_state
@property
def transitions(self):
return self._transitions
@property
def states(self):
return self._states
|
from .function_metadata import FunctionMetadata |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.