content
stringlengths 5
1.05M
|
|---|
#!/usr/bin/env python
""" generated source for module SentenceForm """
# package: org.ggp.base.util.gdl.model
import java.util.List
import org.ggp.base.util.gdl.GdlUtils
import org.ggp.base.util.gdl.grammar.GdlConstant
import org.ggp.base.util.gdl.grammar.GdlSentence
import org.ggp.base.util.gdl.grammar.GdlTerm
#
# * A sentence form captures the structure of a group of possible
# * GdlSentences. Two sentences have the same form if they have the
# * same name and include the same functions in the same place
# *
# * Implementations of SentenceForm should be immutable. They
# * should extend {@link AbstractSentenceForm} for implementations
# * of hashCode and equals that will be compatible with other
# * SentenceForms, as well as a recommended implementation of
# * toString.
#
class SentenceForm(object):
""" generated source for interface SentenceForm """
__metaclass__ = ABCMeta
#
# * Returns the name of all sentences with this form.
#
@abstractmethod
def getName(self):
""" generated source for method getName """
#
# * Returns a sentence form exactly like this one, except
# * with a new name.
#
@abstractmethod
def withName(self, name):
""" generated source for method withName """
#
# * Returns true iff the given sentence is of this sentence form.
#
@abstractmethod
def matches(self, relation):
""" generated source for method matches """
#
# * Returns the number of constants and/or variables that a sentence
# * of this form contains.
#
@abstractmethod
def getTupleSize(self):
""" generated source for method getTupleSize """
#
# * Given a list of GdlConstants and/or GdlVariables in the
# * order they would appear in a sentence of this sentence form,
# * returns that sentence.
# *
# * For the opposite operation (getting a tuple from a sentence),
# * see {@link GdlUtils#getTupleFromSentence(GdlSentence)} and
# * {@link GdlUtils#getTupleFromGroundSentence(GdlSentence)}.
#
@abstractmethod
def getSentenceFromTuple(self, tuple_):
""" generated source for method getSentenceFromTuple """
|
# forms.py
from django.db import models
from django import forms
from django.forms import ModelForm
from core.models import Question
class AskQuestionForm(ModelForm):
class Meta:
model = Question
fields = [ 'question', 'details', 'tags', 'author', ]
|
import sqlalchemy
from databases import Database
from src.app.settings import settings
db = Database(settings.db_dsn, force_rollback=(settings.environment == "TESTING"))
metadata = sqlalchemy.MetaData()
|
"""Výpočet a vykreslení Wan-Sunova podivného atraktoru."""
# coding: utf-8
# # The Wang - Sun attractor
# Please also see https://hipwallpaper.com/view/9W3CM8
# In[1]:
# import všech potřebných knihoven - Numpy a Matplotlibu
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
# In[2]:
# funkce pro výpočet dalšího bodu Wang-Sunova atraktoru
def wang_sun(x, y, z, alfa, beta, gamma, delta, epsilon, zeta):
"""Výpočet dalšího bodu Wang-Sunova atraktoru."""
x_dot = x * alfa + gamma * y * z
y_dot = x * beta + y * delta - x * z
z_dot = z * epsilon + zeta * x * y
return x_dot, y_dot, z_dot
# In[14]:
# krok (změna času)
dt = 0.001
# celkový počet vypočtených bodů na Lorenzově atraktoru
n = 1000000
# prozatím prázdné pole připravené pro výpočet
x = np.zeros((n,))
y = np.zeros((n,))
z = np.zeros((n,))
# In[15]:
# počáteční hodnoty
x[0], y[0], z[0] = (1.05, 1.1, 1.5)
# vlastní výpočet atraktoru
for i in range(n-1):
x_dot, y_dot, z_dot = wang_sun(x[i], y[i], z[i], 0.2, -0.01, 1.0, -0.4, -1.0, -1.0)
x[i+1] = x[i] + x_dot * dt
y[i+1] = y[i] + y_dot * dt
z[i+1] = z[i] + z_dot * dt
fig = plt.figure()
ax = fig.gca(projection='3d')
# vykreslení grafu
ax.plot(x, y, z)
# zobrazení grafu
plt.tight_layout()
plt.show()
# In[16]:
ch_3d = np.stack((x, y, z))
lim_xyz = [(np.min(ch_3d[ii]), np.max(ch_3d[ii])) for ii in range(3)]
fig2 = plt.figure('3D Coordinates')
plt.subplot(2, 2, 1)
plt.plot(y, x, linewidth=0.75)
plt.grid()
plt.xlabel('X')
plt.ylabel('Y')
plt.xlim(lim_xyz[1])
plt.ylim(lim_xyz[0])
plt.subplot(2, 2, 2)
plt.plot(y, z, linewidth=0.75)
plt.grid()
plt.xlabel('Z')
plt.ylabel('Y')
plt.xlim(lim_xyz[1])
plt.ylim(lim_xyz[2])
plt.subplot(2, 2, 3)
plt.plot(z, x, linewidth=0.75)
plt.grid()
plt.xlabel('X')
plt.ylabel('Z')
plt.xlim(lim_xyz[2])
plt.ylim(lim_xyz[0])
ax = fig2.add_subplot(2, 2, 4, projection='3d')
ax.plot(x, y, z, linewidth=0.7)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.tight_layout()
plt.tight_layout()
plt.show()
|
# Formatting configuration for locale cs
languages={'gu': u'Gujarat\u0161tina', 'gd': u'Skotsk\xe1 gal\u0161tina', 'ga': u'Ir\u0161tina', 'gn': u'Guaran\u0161tina', 'gl': u'Hali\u010d\u0161tina', 'la': 'Latina', 'ln': u'Lingal\u0161tina', 'lo': u'Lao\u0161tina', 'tt': u'Tatar\u0161tina', 'tr': u'Ture\u010dtina', 'ts': 'Tsonga', 'lv': u'Loty\u0161tina', 'to': 'Tonga', 'lt': u'Litev\u0161tina', 'tk': u'Turkmen\u0161tina', 'th': u'Thaj\u0161tina', 'ti': u'Tigrinij\u0161tina', 'tg': u'T\xe1d\u017ei\u010dtina', 'te': u'Telug\u0161tina', 'ta': u'Tamil\u0161tina', 'yi': u'Jidi\u0161', 'yo': 'Yoruba', 'de': u'N\u011bm\u010dina', 'da': u'D\xe1n\u0161tina', 'dz': u'Bh\xfat\xe1n\u0161tina', 'st': 'Sesotho', 'qu': u'Ke\u010du\xe1n\u0161tina', 'el': u'\u0158e\u010dtina', 'eo': 'Esperanto', 'en': u'Angli\u010dtina', 'zh': u'\u010c\xedn\u0161tina', 'za': 'Zhuang', 'uk': u'Ukrajin\u0161tina', 'eu': u'Baski\u010dtina', 'et': u'Eston\u0161tina', 'es': u'\u0160pan\u011bl\u0161tina', 'ru': u'Ru\u0161tina', 'rw': u'Kinyarwand\u0161tina', 'rm': u'R\xe9torom\xe1n\u0161tina', 'rn': 'Kirundi', 'ro': u'Rumun\u0161tina', 'bn': u'Beng\xe1l\u0161tina', 'be': u'B\u011bloru\u0161tina', 'bg': u'Bulhar\u0161tina', 'ba': u'Baskir\u0161tina', 'wo': 'Wolof', 'jv': u'Jav\xe1n\u0161tina', 'bo': u'Tibet\u0161tina', 'bh': u'Bihar\u0161tina', 'bi': u'Bisl\xe1m\u0161tina', 'br': u'Breta\u0148\u0161tina', 'ja': u'Japon\u0161tina', 'om': 'Oromo (Afan)', 'oc': 'Occitan', 'tw': 'Twi', 'or': 'Oriya', 'xh': 'Xhosa', 'co': u'Korsi\u010dtina', 'ca': u'Katal\xe1n\u0161tina', 'cy': u'Vel\u0161tina', 'cs': u'\u010ce\u0161tina', 'ps': 'Pashto (Pushto)', 'pt': u'Portugal\u0161tina', 'tl': 'Tagalog', 'pa': u'Pa\u0148d\u017e\xe1b\u0161tina', 'vi': u'Vietnam\u0161tina', 'pl': u'Pol\u0161tina', 'hy': u'Arm\xe9n\u0161tina', 'hr': u'Chorvat\u0161tina', 'iu': u'Inuktitut\u0161tina', 'hu': u'Ma\u010far\u0161tina', 'hi': u'Hind\u0161tina', 'ha': 'Hausa', 'he': u'Hebrej\u0161tina', 'mg': u'Malga\u0161tina', 'uz': u'Uzbe\u010dtina', 'ml': u'Malabar\u0161tina', 'mo': u'Moldav\u0161tina', 'mn': u'Mongol\u0161tina', 'mi': u'Maor\u0161tina', 'ik': u'Inupiak\u0161tina', 'mk': u'Makedon\u0161tina', 'ur': u'Urd\u0161tina', 'mt': u'Malt\u0161tina', 'ms': u'Malaj\u0161tina', 'mr': 'Marathi', 'ug': u'Uighur\u0161tina', 'my': u'Barm\u0161tina', 'aa': u'Afar\u0161tina', 'ab': u'Abch\xe1z\u0161tina', 'ss': u'Siswat\u0161tina', 'af': u'Afrik\xe1n\u0161tina', 'tn': u'Setswan\u0161tina', 'sw': u'Svahil\u0161tina', 'is': u'Island\u0161tina', 'am': u'Amhar\u0161tina', 'it': u'Ital\u0161tina', 'sv': u'\u0160v\xe9d\u0161tina', 'ia': 'Interlingua', 'as': u'Assam\xe9\u0161tina', 'ar': u'Arab\u0161tina', 'su': u'Sundan\u0161tina', 'zu': 'Zulu', 'ay': u'Aym\xe1r\u0161tina', 'az': u'Azerbajd\u017e\xe1n\u0161tina', 'ie': 'Interlingue', 'id': u'Indon\xe9\u0161tina', 'sk': u'Sloven\u0161tina', 'nl': u'Holand\u0161tina', 'no': u'Nor\u0161tina', 'na': 'Nauru', 'ne': u'Nep\xe1l\u0161tina', 'vo': 'Volapuk', 'fr': u'Francouz\u0161tina', 'sm': u'Samoy\u0161tina', 'fy': u'Fr\xed\u0161tina', 'fa': u'Per\u0161tina', 'fi': u'Fin\u0161tina', 'fj': u'Fid\u017ei', 'sa': 'Sanskrt', 'fo': u'Faer\u0161tina', 'ka': u'Gruz\xedn\u0161tina', 'kk': u'Kazach\u0161tina', 'sr': u'Srb\u0161tina', 'sq': u'Alb\xe1n\u0161tina', 'ko': u'Korej\u0161tina', 'kn': u'Kannad\u0161tina', 'km': u'Kambod\u017e\u0161tina', 'kl': u'Gr\xf3n\u0161tina', 'ks': u'Ka\u0161m\xedr\u0161tina', 'si': u'Sinh\xe1l\u0161tina', 'sh': u'Srbochorvat\u0161tina', 'so': u'Som\xe1l\u0161tina', 'sn': 'Shona', 'ku': u'Kurd\u0161tina', 'sl': u'Slovin\u0161tina', 'ky': u'Kirgiz\u0161tina', 'sg': 'Sangho', 'sd': 'Sindhi'}
countries={'BD': u'Banglad\xe9\u0161', 'BE': 'Belgie', 'BF': 'Burkina Faso', 'BG': 'Bulharsko', 'BA': 'Bosna a Hercegovina', 'BB': 'Barbados', 'WF': 'Wallis a Futuna', 'BM': 'Bermudy', 'BN': 'Brunej Darussalam', 'BO': u'Bol\xedvie', 'BH': 'Bahrajn', 'BI': 'Burundi', 'BJ': 'Benin', 'BT': u'Bh\xfat\xe1n', 'JM': 'Jamajka', 'BV': 'Ostrov Bouvet', 'BW': 'Botswana', 'WS': 'Samoa', 'BR': u'Braz\xedlie', 'BS': 'Bahamy', 'BY': u'B\u011blorusko', 'BZ': 'Belize', 'RU': 'Rusko', 'RW': 'Rwanda', 'TL': u'V\xfdchodn\xed Timor', 'RE': u'R\xe9union', 'TM': u'Turkmenist\xe1n', 'TJ': u'T\xe1d\u017eikist\xe1n', 'RO': 'Rumunsko', 'TK': 'Tokelau', 'GW': 'Guinea-Bissau', 'GU': 'Guam', 'GT': 'Guatemala', 'GS': u'Ji\u017en\xed Georgie a Ji\u017en\xed Sandwichovy ostrovy', 'GR': u'\u0158ecko', 'GQ': u'Rovn\xedkov\xe1 Guinea', 'GP': 'Guadeloupe', 'JP': 'Japonsko', 'GY': 'Guyana', 'GF': u'Francouzsk\xe1 Guyana', 'GE': 'Gruzie', 'GD': 'Grenada', 'GB': u'Velk\xe1 Brit\xe1nie', 'GA': 'Gabon', 'SV': 'El Salvador', 'GN': 'Guinea', 'GM': 'Gambie', 'GL': u'Gr\xf3nsko', 'GI': 'Gibraltar', 'GH': 'Ghana', 'OM': u'Om\xe1n', 'TN': 'Tunisko', 'JO': u'Jord\xe1nsko', 'SP': 'Serbia', 'HR': 'Chorvatsko', 'HT': 'Haiti', 'HU': u'Ma\u010farsko', 'HK': u'Hongkong, zvl\xe1\u0161tn\xed administrativn\xed oblast \u010c\xedny', 'HN': 'Honduras', 'HM': 'Ostrovy Heard a McDonald', 'VE': 'Venezuela', 'PR': 'Portoriko', 'PS': 'Palestinian Territory', 'PW': 'Palau', 'PT': 'Portugalsko', 'SJ': 'Svalbard a Jan Mayen', 'PY': 'Paraguay', 'IQ': u'Ir\xe1k', 'PA': 'Panama', 'PF': u'Francouzsk\xe1 Polyn\xe9sie', 'PG': u'Papua-Nov\xe1 Guinea', 'PE': 'Peru', 'PK': u'P\xe1kist\xe1n', 'PH': u'Filip\xedny', 'PN': 'Pitcairn', 'PL': 'Polsko', 'PM': u'Svat\xfd Pierre a Miquelon', 'ZM': 'Zambie', 'EH': u'Z\xe1padn\xed Sahara', 'EE': 'Estonsko', 'EG': 'Egypt', 'ZA': u'Ji\u017en\xed Afrika', 'EC': u'Ekv\xe1dor', 'IT': u'It\xe1lie', 'VN': 'Vietnam', 'SB': u'\u0160alamounovy ostrovy', 'ET': 'Etiopie', 'SO': u'Som\xe1lsko', 'ZW': 'Zimbabwe', 'SA': u'Sa\xfadsk\xe1 Ar\xe1bie', 'ES': u'\u0160pan\u011blsko', 'ER': 'Eritrea', 'MD': 'Moldavsko, republika', 'MG': 'Madagaskar', 'MA': 'Maroko', 'MC': 'Monako', 'UZ': u'Uzbekist\xe1n', 'MM': 'Myanmar (Burma)', 'ML': 'Mali', 'MO': 'Macao S.A.R. China', 'MN': 'Mongolsko', 'MH': 'Marshallovy ostrovy', 'MK': 'Macedonia', 'MU': 'Mauricius', 'MT': 'Malta', 'MW': 'Malawi', 'MV': 'Maladivy', 'MQ': 'Martinik', 'MP': u'Severn\xed Mariany', 'MS': 'Montserrat', 'MR': u'Maurit\xe1nie', 'UG': 'Uganda', 'MY': 'Malajsie', 'MX': 'Mexiko', 'IL': 'Izrael', 'FR': 'Francie', 'IO': u'Britsk\xe9 \xfazem\xed v Indick\xe9m oce\xe1nu', 'SH': u'Svat\xe1 Helena', 'FI': 'Finsko', 'FJ': u'Fid\u017ei', 'FK': u'Falklandsk\xe9 ostrovy', 'FM': u'Mikron\xe9sie, federativn\xed st\xe1t', 'FO': u'Faersk\xe9 ostrovy', 'NI': 'Nikaragua', 'NL': 'Nizozemsko', 'NO': 'Norsko', 'NA': 'Namibie', 'VU': 'Vanuatu', 'NC': u'Nov\xe1 Kaledonie', 'NE': 'Niger', 'NF': 'Norfolk', 'NG': u'Nig\xe9rie', 'NZ': u'Nov\xfd Z\xe9land', 'NP': u'Nep\xe1l', 'NR': 'Nauru', 'NU': 'Niue', 'CK': 'Cookovy ostrovy', 'CI': u'Pob\u0159e\u017e\xed slonoviny', 'CH': u'\u0160v\xfdcarsko', 'CO': 'Kolumbie', 'CN': u'\u010c\xedna', 'CM': 'Kamerun', 'CL': 'Chile', 'CC': u'Kokosov\xe9 ostrovy', 'CA': 'Kanada', 'CG': 'Kongo', 'CF': u'St\u0159edoafrick\xe1 republika', 'CD': u'Kongo, demokratick\xe1 republika', 'CZ': u'\u010cesk\xe1 republika', 'CY': 'Kypr', 'CX': u'V\xe1no\u010dn\xed ostrovy', 'CR': 'Kostarika', 'Fallback': 'en', 'CV': 'Kapverdy', 'CU': 'Kuba', 'SZ': 'Svazijsko', 'SY': u'S\xfdrie', 'KG': u'Kyrgyzst\xe1n', 'KE': u'Ke\u0148a', 'SR': 'Surinam', 'KI': 'Kiribati', 'KH': u'Kambod\u017ea', 'KN': u'Svat\xfd Kitts a Nevis', 'KM': 'Komory', 'ST': u'Svat\xfd Tom\xe1\u0161', 'SK': 'Slovensko', 'KR': u'Ji\u017en\xed Korea', 'SI': 'Slovinsko', 'KP': u'Severn\xed Korea', 'KW': 'Kuvajt', 'SN': 'Senegal', 'SM': 'San Marino', 'SL': 'Sierra Leone', 'SC': 'Seychely', 'KZ': u'Kazachst\xe1n', 'KY': u'Kajmansk\xe9 ostrovy', 'SG': 'Singapur', 'SE': u'\u0160v\xe9dsko', 'SD': u'S\xfad\xe1n', 'DO': u'Dominik\xe1nsk\xe1 republika', 'DM': 'Dominika', 'DJ': u'D\u017eibuti', 'DK': u'D\xe1nsko', 'VG': u'Britsk\xe9 Panensk\xe9 ostrovy', 'DE': u'N\u011bmecko', 'YE': 'Jemen', 'DZ': u'Al\u017e\xedrsko', 'US': u'Spojen\xe9 st\xe1ty', 'UY': 'Uruguay', 'YU': u'Jugosl\xe1vie', 'YT': 'Mayotte', 'UM': u'Men\u0161\xed odlehl\xe9 ostrovy USA', 'LB': 'Libanon', 'LC': u'Svat\xe1 Lucie', 'LA': u'Lidov\u011b demokratick\xe1 republika Laos', 'TV': 'Tuvalu', 'TW': 'Tchaj-wan', 'TT': 'Trinidad a Tobago', 'TR': 'Turecko', 'LK': u'Sr\xed Lanka', 'LI': u'Lichten\u0161tejnsko', 'LV': u'Loty\u0161sko', 'TO': 'Tonga', 'LT': 'Litva', 'LU': 'Lucembursko', 'LR': u'Lib\xe9rie', 'LS': 'Lesotho', 'TH': 'Thajsko', 'TF': u'Francouzsk\xe1 ji\u017en\xed teritoria', 'TG': 'Togo', 'TD': u'\u010cad', 'TC': 'Ostrovy Caicos a Turks', 'LY': 'Libye', 'VA': u'Svat\xfd stolec', 'VC': u'Svat\xfd Vincent a Grenadiny', 'AE': u'Spojen\xe9 arabsk\xe9 emir\xe1ty', 'AD': 'Andorra', 'AG': 'Antigua a Barbuda', 'AF': u'Afgh\xe1nist\xe1n', 'AI': 'Anguila', 'VI': u'Americk\xe9 Panensk\xe9 ostrovy', 'IS': 'Island', 'IR': u'\xcdr\xe1n', 'AM': u'Arm\xe9nie', 'AL': u'Alb\xe1nie', 'AO': 'Angola', 'AN': u'Nizozemsk\xe9 Antily', 'AQ': 'Antarktida', 'AS': u'Americk\xe1 Samoa', 'AR': 'Argentina', 'AU': u'Austr\xe1lie', 'AT': 'Rakousko', 'AW': 'Aruba', 'IN': 'Indie', 'TZ': 'Tanzanie', 'AZ': u'\xc1zerb\xe1jd\u017e\xe1n', 'IE': 'Irsko', 'ID': u'Indon\xe9sie', 'UA': 'Ukrajina', 'QA': 'Katar', 'MZ': 'Mosambik'}
months=['leden', u'\xfanor', u'b\u0159ezen', 'duben', u'kv\u011bten', u'\u010derven', u'\u010dervenec', 'srpen', u'z\xe1\u0159\xed', u'\u0159\xedjen', 'listopad', 'prosinec']
abbrMonths=['I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X', 'XI', 'XII']
days=[u'pond\u011bl\xed', u'\xfater\xfd', u'st\u0159eda', u'\u010dtvrtek', u'p\xe1tek', 'sobota', u'ned\u011ble']
abbrDays=['po', u'\xfat', 'st', u'\u010dt', u'p\xe1', 'so', 'ne']
dateFormats={'medium': '%d.%m.%Y', 'full': '%%(dayname)s, %d. %%(monthname)s %Y', 'long': '%d. %%(monthname)s %Y', 'short': '%d.%m.%y'}
numericSymbols={'group': u'\xa0', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'}
|
from __future__ import print_function
import argparse
import os
import sys
from .fileio import get_files
from .graphics import match_diagnostic
from .utils import check_boundaries
from .sfh import SFH
def main(argv):
parser = argparse.ArgumentParser(description="Plot match diagnostics")
parser.add_argument('-f', '--filters', type=str, default=None,
help='comma separated filter names')
parser.add_argument('-d', '--directory', type=str, default=os.getcwd(),
help='specify directory')
parser.add_argument('-l', '--logcounts', action="store_true",
help='pgcmd with logcounts')
parser.add_argument('-n', '--name', nargs='*', type=str, help='match cmd, sfh, zc\
file(s)')
args = parser.parse_args(argv)
if args.name is None:
cmd_names = get_files(args.directory, '*cmd')
sfh_files = get_files(args.directory, '*sfh')
sfh_files.extend(get_files(args.directory, '*zc'))
params = get_files(args.directory, '*.param')
phots = get_files(args.directory, '*match')
scrns = get_files(args.directory, '*scrn')
scrns = [s for s in scrns if 'mcmc' not in s]
else:
cmd_names = [n for n in args.name if n.endswith('cmd')]
sfh_files = [n for n in args.name if n.endswith('sfh')]
sfh_files.extend([n for n in args.name if 'zc' in n])
params = [n for n in args.name if n.endswith('param')]
phots = [n for n in args.name if n.endswith('match')]
scrns = [n for n in args.name if n.endswith('scrn')]
scrns = [s for s in scrns if 'mcmc' not in s]
[check_boundaries(p, s) for p, s in zip(params, scrns)]
labels = ['${\\rm %s}$' % i for i in ('data', 'model', 'diff', 'sig')]
#call_pgcmd(cmd_names, filter1, filter2, labels=labels,
# logcounts=args.logcounts)
if len(sfh_files) > 0:
for sfh_file in sfh_files:
msfh = SFH(sfh_file)
if len(msfh.data) != 0:
msfh.sfh_plot()
msfh.plot_csfr()
[match_diagnostic(params[i], phots[i]) for i in range(len(phots))]
if __name__ == "__main__":
main(sys.argv[1:])
|
import sys
import os.path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from Code.MathLib import ecartType
import test_common
import pytest
lower_ecart_type = 5.393
upper_ecart_type = 3694932.426
def test_ecart_type_lower_bound():
assert test_common.isclose(ecartType(test_common.lowerBound), lower_ecart_type), "Lower ecart type bound test"
def test_ecart_type_upper_bound():
assert test_common.isclose(ecartType(test_common.upperBound), upper_ecart_type), "Upper ecart type bound test"
def test_ecart_type_invalid():
with pytest.raises(TypeError, message="Invalid ecart type test"):
ecartType(test_common.invalid)
|
# Copyright 2018 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import uuid
import time
from django.test import Client
from django.test import TestCase
from rest_framework import status
from lcm.pub.database.models import FPInstModel, CPInstModel, PortInstModel, NfInstModel
from lcm.pub.database.models import VNFFGInstModel
from lcm.pub.msapi import extsys
from lcm.pub.msapi import sdncdriver
from lcm.pub.utils import restcall
from lcm.ns_sfcs.biz.create_sfc_worker import CreateSfcWorker
from lcm.pub.utils.jobutil import JobUtil
from lcm.ns_sfcs.tests.test_data import nsd_model
class TestSfc(TestCase):
def setUp(self):
self.client = Client()
FPInstModel.objects.filter().delete()
VNFFGInstModel.objects.filter().delete()
CPInstModel.objects.filter().delete()
PortInstModel.objects.filter().delete()
NfInstModel.objects.filter().delete()
self.save_vnffg_inst_data()
self.save_vnf_inst_data()
self.save_cp_inst_data()
self.save_port_inst_data()
self.save_fp_inst_data()
def tearDown(self):
FPInstModel.objects.filter().delete()
VNFFGInstModel.objects.filter().delete()
CPInstModel.objects.filter().delete()
PortInstModel.objects.filter().delete()
NfInstModel.objects.filter().delete()
@mock.patch.object(extsys, "get_sdn_controller_by_id")
@mock.patch.object(sdncdriver, "create_flow_classfier")
@mock.patch.object(restcall, 'call_req')
def test_create_flow_classfier(self, mock_call_req, mock_create_flow_classfier, mock_get_sdn_controller_by_id):
data = {
"fpinstid": "fp_inst_1",
"context": json.dumps(nsd_model)
}
mock_create_flow_classfier.return_value = [0, json.dumps({'id': '1'})]
mock_get_sdn_controller_by_id.return_value = json.loads('{"test":"test_name","url":"url_add"}')
resp = self.client.post("/api/nslcm/v1/ns/create_flow_classifier", data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
@mock.patch.object(extsys, "get_sdn_controller_by_id")
@mock.patch.object(sdncdriver, 'create_port_pair_group')
@mock.patch.object(sdncdriver, 'create_port_pair')
@mock.patch.object(restcall, 'call_req')
def test_create_port_pair_group(self, mock_call_req, mock_create_port_pair,
mock_create_port_pair_group, mock_get_sdn_controller_by_id):
data = {
"nsinstanceid": "ns_inst_1",
"fpinstid": "fp_inst_1",
"context": json.dumps(nsd_model)
}
mock_create_port_pair.return_value = [0, json.dumps({'id': '1'})]
mock_create_port_pair_group.return_value = [0, json.dumps({'id': '1'})]
mock_get_sdn_controller_by_id.return_value = json.loads('{"test":"test_name","url":"url_add"}')
resp = self.client.post("/api/nslcm/v1/ns/create_port_pair_group", data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
@mock.patch.object(extsys, "get_sdn_controller_by_id")
@mock.patch.object(sdncdriver, 'create_port_chain')
@mock.patch.object(restcall, 'call_req')
def test_create_port_chain(self, mock_call_req, mock_create_port_chain, mock_get_sdn_controller_by_id):
data = {
"nsinstanceid": "ns_inst_1",
"fpinstid": "fp_inst_1",
"context": json.dumps(nsd_model)
}
self.update_fp_inst_data()
mock_create_port_chain.return_value = [0, json.dumps({'id': '1'})]
mock_get_sdn_controller_by_id.return_value = json.loads('{"test":"test_name","url":"url_add"}')
resp = self.client.post("/api/nslcm/v1/ns/create_port_chain", data)
self.assertEqual(resp.status_code, status.HTTP_200_OK)
@mock.patch.object(CreateSfcWorker, 'run')
@mock.patch.object(JobUtil, 'create_job')
@mock.patch.object(time, 'sleep')
def test_create_sfc(self, mock_sleep, mock_create_job, mock_run):
mock_create_job.return_value = 'job_id_1'
mock_sleep.return_value = None
mock_run.return_value = None
data = {
'nsInstanceid': "ns_inst_1",
"context": json.dumps(nsd_model),
"fpindex": "1",
'fpinstid': str(uuid.uuid4()),
"sdnControllerId": "sdnControllerId_1"
}
resp = self.client.post("/api/nslcm/v1/ns/sfcs", data, format='json')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp.data['jobId'], 'job_id_1')
def update_fp_inst_data(self):
FPInstModel.objects.filter(fpinstid="fp_inst_1").update(flowclassifiers="1",
portpairgroups=json.JSONEncoder().encode([{
"groupid": "1",
"portpair": ["2"]
}]))
def save_vnffg_inst_data(self):
VNFFGInstModel(
vnffgdid="vnffg_id1",
vnffginstid="vnffg_inst_1",
nsinstid="ns_inst_1",
endpointnumber=2,
vllist="vlinst1",
cplist="cp1",
vnflist="vnf1,vnf2"
).save()
def save_cp_inst_data(self):
CPInstModel(
cpinstanceid="cp_inst_1",
cpdid="cpd_1",
ownertype=0,
ownerid="vnf_inst_1",
relatedtype=1,
relatedport="port_inst_1"
).save()
CPInstModel(
cpinstanceid="cp_inst_2",
cpdid="cpd_2",
ownertype=0,
ownerid="vnf_inst_2",
relatedtype=1,
relatedport="port_inst_2"
).save()
def save_fp_inst_data(self):
FPInstModel(
fpid="fpd_1",
fpinstid="fp_inst_1",
nsinstid="ns_inst_1",
vnffginstid="vnffg_inst_1",
policyinfo=[{
"type": "ACL",
"criteria": {
"dest_port_range": [80, 1024],
"source_port_range": [80, 1024],
"ip_protocol": "tcp",
"dest_ip_range": ["192.168.1.2", "192.168.1.100"],
"source_ip_range": ["192.168.1.2", "192.168.1.100"],
"dscp": 100,
}
}],
status="enabled",
sdncontrollerid="sdn_controller_1"
).save()
FPInstModel(
fpid="fpd_2",
fpinstid="fp_inst_2",
nsinstid="ns_inst_1",
vnffginstid="vnffg_inst_1",
policyinfo=[{
"type": "ACL",
"criteria": {
"dest_port_range": [80, 1024],
"source_port_range": [80, 1024],
"ip_protocol": "tcp",
"dest_ip_range": ["192.168.1.2", "192.168.1.100"],
"source_ip_range": ["192.168.1.2", "192.168.1.100"],
"dscp": 100,
}
}],
status="enabled",
sdncontrollerid="sdn_controller_1"
).save()
def save_port_inst_data(self):
PortInstModel(
portid="port_inst_1",
networkid="network_inst_1",
subnetworkid="subnetwork_inst_1",
vimid="vim_1",
resourceid="res_1",
ipaddress="10.43.25.2",
macaddress="EC-F4-BB-20-43-F1"
).save()
PortInstModel(
portid="port_inst_2",
networkid="network_inst_1",
subnetworkid="subnetwork_inst_1",
vimid="vim_1",
resourceid="res_1",
ipaddress="10.43.25.3",
macaddress="EC-F4-BB-20-43-F2"
).save()
def save_vnf_inst_data(self):
NfInstModel(
nfinstid="vnf_inst_1",
ns_inst_id="ns_inst_1",
vnf_id="vnf_1",
vnfd_model=json.dumps(vnfd_model_dict1)
).save()
NfInstModel(
nfinstid="vnf_inst_2",
vnf_id="vnf_2",
ns_inst_id="ns_inst_1",
vnfd_model=json.dumps(vnfd_model_dict2)
).save()
vnfd_model_dict1 = {
'vdus': [
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '2'
},
'local_storages': [
],
'vdu_id': 'vdu_omm.001',
'image_file': 'opencos_sss_omm_img_release_20150723-1-disk1',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': 'omm.001',
'manual_scale_select_vim': False
},
'description': 'singleommvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '4'
},
'local_storages': [
],
'vdu_id': 'vdu_1',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '1',
'manual_scale_select_vim': False
},
'description': 'ompvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_2',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '2',
'manual_scale_select_vim': False
},
'description': 'ompvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_3',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '3',
'manual_scale_select_vim': False
},
'description': 'ompvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '4'
},
'local_storages': [
],
'vdu_id': 'vdu_10',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '10',
'manual_scale_select_vim': False
},
'description': 'ppvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_11',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '11',
'manual_scale_select_vim': False
},
'description': 'ppvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_12',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '12',
'manual_scale_select_vim': False
},
'description': 'ppvm'
}
],
'volumn_storages': [
],
'policies': {
'scaling': {
'targets': {
},
'policy_id': 'policy_scale_sss-vnf-template',
'properties': {
'policy_file': '*-vnfd.zip/*-vnf-policy.xml'
},
'description': ''
}
},
'image_files': [
{
'description': '',
'properties': {
'name': 'opencos_sss_omm_img_release_20150723-1-disk1.vmdk',
'checksum': '',
'disk_format': 'VMDK',
'file_url': './zte-cn-sss-main-image/OMM/opencos_sss_omm_img_release_20150723-1-disk1.vmdk',
'container_type': 'vm',
'version': '',
'hypervisor_type': 'kvm'
},
'image_file_id': 'opencos_sss_omm_img_release_20150723-1-disk1'
},
{
'description': '',
'properties': {
'name': 'sss.vmdk',
'checksum': '',
'disk_format': 'VMDK',
'file_url': './zte-cn-sss-main-image/NE/sss.vmdk',
'container_type': 'vm',
'version': '',
'hypervisor_type': 'kvm'
},
'image_file_id': 'sss'
}
],
'vls': [
],
'cps': [
{'cp_id': 'cpd_1',
"description": "",
"properties": {
"mac_address": "00:d9:00:82:11:e1",
"ip_address": "10.43.25.2",
"ip_range_start": "192.168.1.20",
"ip_range_end": "192.168.1.29",
"sfc_encapsulation": ""
}
},
],
'metadata': {
'vendor': 'zte',
'is_shared': False,
'description': '',
'domain_type': 'CN',
'version': 'v4.14.10',
'vmnumber_overquota_alarm': False,
'cross_dc': False,
'vnf_type': 'SSS',
'vnfd_version': 'V00000001',
'id': 'vnfd_2',
'name': 'sss-vnf-template'
},
'vnf_exposed': {
"external_cps": [
{
"key_name": "virtualLink1",
"cp_id": "cp1",
},
],
"forward_cps": [
{
"key_name": "forwarder1",
"cp_id": "cpd_1",
},
{
"key_name": "forwarder2",
"cp_id": "cpd_2",
},
],
}
}
vnfd_model_dict2 = {
'local_storages': [
],
'vdus': [
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '2'
},
'local_storages': [
],
'vdu_id': 'vdu_omm.001',
'image_file': 'opencos_sss_omm_img_release_20150723-1-disk1',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': 'omm.001',
'manual_scale_select_vim': False
},
'description': 'singleommvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '4'
},
'local_storages': [
],
'vdu_id': 'vdu_1',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '1',
'manual_scale_select_vim': False
},
'description': 'ompvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_2',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '2',
'manual_scale_select_vim': False
},
'description': 'ompvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_3',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '3',
'manual_scale_select_vim': False
},
'description': 'ompvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '4'
},
'local_storages': [
],
'vdu_id': 'vdu_10',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '10',
'manual_scale_select_vim': False
},
'description': 'ppvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_11',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '11',
'manual_scale_select_vim': False
},
'description': 'ppvm'
},
{
'volumn_storages': [
],
'nfv_compute': {
'mem_size': '',
'num_cpus': '14'
},
'local_storages': [
],
'vdu_id': 'vdu_12',
'image_file': 'sss',
'dependencies': [
],
'vls': [
],
'cps': [
],
'properties': {
'key_vdu': '',
'support_scaling': False,
'vdu_type': '',
'name': '',
'storage_policy': '',
'location_info': {
'vimId': '',
'availability_zone': '',
'region': '',
'dc': '',
'host': '',
'tenant': ''
},
'inject_data_list': [
],
'watchdog': {
'action': '',
'enabledelay': ''
},
'local_affinity_antiaffinity_rule': {
},
'template_id': '12',
'manual_scale_select_vim': False
},
'description': 'ppvm'
}
],
'volumn_storages': [
],
'policies': {
'scaling': {
'targets': {
},
'policy_id': 'policy_scale_sss-vnf-template',
'properties': {
'policy_file': '*-vnfd.zip/*-vnf-policy.xml'
},
'description': ''
}
},
'image_files': [
{
'description': '',
'properties': {
'name': 'opencos_sss_omm_img_release_20150723-1-disk1.vmdk',
'checksum': '',
'disk_format': 'VMDK',
'file_url': './zte-cn-sss-main-image/OMM/opencos_sss_omm_img_release_20150723-1-disk1.vmdk',
'container_type': 'vm',
'version': '',
'hypervisor_type': 'kvm'
},
'image_file_id': 'opencos_sss_omm_img_release_20150723-1-disk1'
},
{
'description': '',
'properties': {
'name': 'sss.vmdk',
'checksum': '',
'disk_format': 'VMDK',
'file_url': './zte-cn-sss-main-image/NE/sss.vmdk',
'container_type': 'vm',
'version': '',
'hypervisor_type': 'kvm'
},
'image_file_id': 'sss'
}
],
'vls': [
],
'cps': [
{'cp_id': 'cpd_2',
"description": "",
"properties": {
"mac_address": "00:d9:00:82:11:e2",
"ip_address": "10.43.25.3",
"ip_range_start": "192.168.1.20",
"ip_range_end": "192.168.1.29",
"sfc_encapsulation": ""
}
},
],
'metadata': {
'vendor': 'zte',
'is_shared': False,
'description': '',
'domain_type': 'CN',
'version': 'v4.14.10',
'vmnumber_overquota_alarm': False,
'cross_dc': False,
'vnf_type': 'SSS',
'vnfd_version': 'V00000001',
'id': 'sss-vnf-template',
'name': 'vnfd_2'
},
'vnf_exposed': {
"external_cps": [
{
"key_name": "virtualLink1",
"cp_id": "cp1",
},
],
"forward_cps": [
{
"key_name": "forwarder2",
"cp_id": "cpd_2",
},
{
"key_name": "forwarder3",
"cp_id": "cpd_2",
},
],
}
}
|
#===========================|======================|==================|===========|=======================|========================================================|===============|=============================|=============
# REQ FILE | REQ | TC | PC FILE | MAP FILE | DESCRIPTION | SUB REQ | CONFIG | SPEC COV
#===========================|======================|==================|===========|=======================|========================================================|===============|=============================|=============
# req_file.csv | REQ_1 | TC_1 | pc_1.csv | | Testing initialize_req_cov() with no requirement file. | | | sc_1.csv
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_2 | TC_2 | pc_2.csv | | Testing initialize_req_cov() with a requirement file. | | | sc_2.csv
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_3 [REQ_10] | TC_3 TC_50 TC_1 | pc_3.csv | | Testing log_req_cov() with default testcase, unknown | | | sc_3.csv
# | | | | | testcase and unknown requirement label. | | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_4 | TC_4 TC_4_FAIL | pc_4.csv | | Testing log_req_cov() with no test_status (i.e. PASS) | | | sc_4.csv
# | | | | | and test_status=FAIL. | | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_5 | TC_5 | pc_5.csv | | Testing log_req_cov() with UVVM status error triggered | | | sc_5.csv
# | | | | | prior to initialize_req_cov(). | | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_6 | TC_6 | pc_6.csv | | Testing log_req_cov() with UVVM status error triggered | | | sc_6.csv
# | | | | | after log_req_cov() and prior to finalize_req_cov(). | | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | | TC_7 | pc_7.csv | | Testing initialize_req_cov() with non-existing | | | sc_7.csv
# | | | | | requirement file. | | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# sub_req_file.csv | UART_REQ_GENERAL | TC_SUB_REQ | pc_8.csv | sub_req_map_file.csv | Testing passing sub-requirement with test_status=NA, | UART_REQ_BR_A | cfg_1_strict_0.txt | sc_8_0.csv
# | | | | | msg and SCOPE. | UART_REQ_BR_B | cfg_1_strict_1.txt | sc_8_1.csv
# | | | | | | UART_REQ_ODD | cfg_1_strict_2.txt | sc_8_2.csv
# | | | | | | UART_REQ_EVEN | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# sub_req_file.csv | UART_REQ_GENERAL | TC_SUB_REQ | pc_9.csv | sub_req_map_file.csv | Testing failing sub-requirement with test_status=NA, | UART_REQ_BR_A | cfg_2_strict_0.txt | sc_9_0.csv
# | | | | | msg and SCOPE. | UART_REQ_BR_B | cfg_2_strict_1.txt | sc_9_1.csv
# | | | | | | UART_REQ_ODD | cfg_2_strict_2.txt | sc_9_2.csv
# | | | | | | UART_REQ_EVEN | |
#---------------------------|----------------------|------------------|-----------------------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# sub_req_file.csv |UART_REQ_GENERAL_OMIT | TC_SUB_REQ_OMIT | pc_16.csv | sub_req_map_file.csv | Testing omitted sub-requirement. | UART_REQ_BR_A | | sc_16.csv
# | | | pc_17.csv | | | UART_REQ_BR_B | | sc_17.csv
# | | | pc_18.cav | | | UART_REQ_ODD | | sc_18.csv
# | | | | | | UART_REQ_EVEN | |
# | | | | | | UART_REQ_OMIT | |
#---------------------------|----------------------|------------------|-----------|-----------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_1 | TC_1 | pc_10.csv | | Testing failing simulations with incomplete testcase. | | | sc_10.csv
#---------------------------|----------------------|------------------|-----------|-----------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_1/2/3/4 | TC_1 | pc_11.csv | | Testing multiple REQs with one testcase. | | | sc_11.csv
#---------------------------|----------------------|------------------|-----------|-----------------------|--------------------------------------------------------|---------------|-----------------------------|-------------
# req_file.csv | REQ_88 | TC_8 | pc_12.csv | | Testing non-matching requirement name. | | | sc_12.csv
#===========================|======================|==================|===========|=======================|========================================================|===============|=============================|=============
import subprocess
import os
import sys
test_list = [
["python", "../script/run_spec_cov.py", "--strictness", "0", "-p", "../sim/pc_1.csv", "-s", "../sim/sc_1.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_2.csv", "-s", "../sim/sc_2.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_3.csv", "-s", "../sim/sc_3.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_4.csv", "-s", "../sim/sc_4.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_5.csv", "-s", "../sim/sc_5.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_6.csv", "-s", "../sim/sc_6.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_7.csv", "-s", "../sim/sc_7.csv"],
["python", "../script/run_spec_cov.py", "--config", "../tb/maintenance_tb/cfg_1_strict_0.txt"],
["python", "../script/run_spec_cov.py", "--config", "../tb/maintenance_tb/cfg_1_strict_1.txt"],
["python", "../script/run_spec_cov.py", "--config", "../tb/maintenance_tb/cfg_1_strict_2.txt"],
["python", "../script/run_spec_cov.py", "--config", "../tb/maintenance_tb/cfg_2_strict_0.txt"],
["python", "../script/run_spec_cov.py", "--config", "../tb/maintenance_tb/cfg_2_strict_1.txt"],
["python", "../script/run_spec_cov.py", "--config", "../tb/maintenance_tb/cfg_2_strict_2.txt"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_10.csv", "-s", "../sim/sc_10.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_11.csv", "-s", "../sim/sc_11.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_12.csv", "-s", "../sim/sc_12.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_13.csv", "-s", "../sim/sc_13.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_14.csv", "-s", "../sim/sc_14.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/req_file.csv", "-p", "../sim/pc_15.csv", "-s", "../sim/sc_15.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/sub_req_file.csv", "-m", "../tb/maintenance_tb/sub_req_omit_map_file.csv", "-p", "../sim/pc_16.csv", "-s", "../sim/sc_16.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/sub_req_file.csv", "-m", "../tb/maintenance_tb/sub_req_omit_map_file.csv", "-p", "../sim/pc_17.csv", "-s", "../sim/sc_17.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/sub_req_file.csv", "-m", "../tb/maintenance_tb/sub_req_omit_map_file.csv", "-p", "../sim/pc_18.csv", "-s", "../sim/sc_18.csv"],
["python", "../script/run_spec_cov.py", "--strictness", "0", "-r", "../tb/maintenance_tb/sub_req_file.csv", "-m", "../tb/maintenance_tb/sub_req_omit_map_file.csv", "-p", "../sim/pc_19.csv", "-s", "../sim/sc_19.csv"]
]
def remove_specification_coverage_files():
print("Removing old run_spec_cov.py run files...")
for filename in os.listdir("."):
if filename[0:3] == "sc_":
if filename.endswith(".csv"):
print("Removing : %s" %(filename))
os.remove(filename)
elif filename[0:7] == "output_":
if filename.endswith(".txt"):
print("Removing : %s" %(filename))
os.remove(filename)
def run_tests():
print("Running tests...")
output = None
for idx, test in enumerate(test_list):
print("Test %d : %s" %(idx, test))
try:
output = subprocess.check_output(test, stderr=subprocess.PIPE)
# Save output for golden check
with open("output_" + str(idx + 1) + ".txt", 'w') as file:
file.write(str(output, 'utf-8'))
except subprocess.CalledProcessError as e:
print("ERROR: %s" %(e))
def verify_test_results():
print("Verify test results...")
num_errors = 0
try:
subprocess.check_call(["py", "../script/maintenance_script/verify_with_golden.py"], stderr=subprocess.PIPE)
except subprocess.CalledProcessError as e:
num_errors = int(e.returncode)
if num_errors != 0:
print("Golden failed with %d errors!" %(num_errors))
sys.exit(num_errors)
remove_specification_coverage_files()
run_tests()
verify_test_results()
|
import os
import random
import json
import imgaug
import torch
import numpy as np
import os
import argparse
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
seed = 1234
random.seed(seed)
imgaug.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
from tqdm import tqdm
import models
import torch.nn.functional as F
from utils.datasets.fer2013dataset import fer2013
from utils.generals import make_batch
model_dict_new = [
("efficientnet_b2b", "../user_data/model_data/efficientnet_b2b_2021Jul25_17.08"),
("efficientnet_b3b", "../user_data/model_data/efficientnet_b3b_2021Jul25_20.08"),
("cbam_resnet50", "../user_data/model_data/cbam_resnet50_test_2021Jul24_19.18"),
("resmasking_dropout1", "../user_data/model_data/resmasking_dropout1_test_2021Jul25_10.03"),
("resmasking", "../user_data/model_data/resmasking_test_2021Jul26_14.33"),
("resnest269e","../user_data/model_data/tbw_resnest269e_test_2021Aug02_11.39"),
("hrnet","../user_data/model_data/tbw_hrnet_test_2021Aug01_17.13"),
("swin_large_patch4_window7_224","../user_data/model_data/tbw_swin_large_patch4_window7_224_test_2021Aug02_21.36")
]
def main():
parser = argparse.ArgumentParser(description='emotion')
parser.add_argument('--config', default="fer2013_config.json",type=str, help='config path')
parser.add_argument('--model_name', default="resmasking_dropout1",type=str, help='config path')
parser.add_argument('--checkpoint_path', default="resmasking_dropout1_test_2021Aug01_17.13",type=str, help='config path')
args = parser.parse_args()
with open(args.config) as f:
configs = json.load(f)
test_set = fer2013("test", configs, tta=True, tta_size=10)
# for model_name, checkpoint_path in model_dict_new:
prediction_list = [] # each item is 7-ele array
print("Processing", args.checkpoint_path)
if os.path.exists("../user_data/temp_data/{}.npy".format(args.checkpoint_path)):
return
if configs['type'] == 0:
model = getattr(models, args.model_name)
model = model(in_channels=3, num_classes=7)
elif configs['type'] == 1:
model = models.get_face_model(name=args.model_name)
else:
model = getattr(models, args.model_name)
model = model()
state = torch.load(os.path.join("../user_data/model_data/", args.checkpoint_path),map_location=torch.device('cpu'))
ckpt = {k.replace("module.",''):v for k,v in state['net'].items()}
model.load_state_dict(ckpt)
# model = torch.nn.DataParallel(model)
model.cuda()
model.eval()
with torch.no_grad():
for idx in tqdm(range(len(test_set)), total=len(test_set), leave=False):
images, targets = test_set[idx]
images = make_batch(images)
images = images.cuda(non_blocking=True)
outputs = model(images).cpu()
outputs = F.softmax(outputs, 1)
outputs = torch.sum(outputs, 0) # outputs.shape [tta_size, 7]
outputs = [round(o, 4) for o in outputs.numpy()]
prediction_list.append(outputs)
prediction_list = np.asarray(prediction_list)
if args.checkpoint_path.split('_')[0] != 'efficientnet':
data_4 = prediction_list[:,4].copy()
prediction_list[:,4] = prediction_list[:,6]
prediction_list[:,6] = prediction_list[:,5]
prediction_list[:,5] = data_4
np.save("../temp_data/{}.npy".format(args.checkpoint_path), prediction_list)
if __name__ == "__main__":
main()
|
"""POD-NN modeling for 1D Shekel Equation."""
#%% Imports
import sys
import os
import pickle
import meshio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as plti
from scipy.interpolate import griddata
from scipy.ndimage import map_coordinates
sys.path.append(os.path.join("..", ".."))
from poduqnn.metrics import re_s
from poduqnn.plotting import figsize, savefig
from poduqnn.podnnmodel import PodnnModel
from poduqnn.metrics import re_s
from poduqnn.mesh import read_multi_space_sol_input_mesh_txt
from hyperparams import HP as hp
#%% Load models
model = PodnnModel.load("cache")
X_v_train, v_train, U_train, X_v_val, v_val, U_val = model.load_train_data()
X_v_train_0, v_train_0, U_train_0, X_v_val_0, v_val_0, U_val_0 = model.load_init_data()
#%% Predict and restruct
# U_pred, U_pred_sig = model.predict(X_v_val)
# #%% Validation metrics
# U_pred, _ = model.predict(X_v_val)
# err_val = re_s(U_val, U_pred, div_max=True)
# print(f"RE_v: {err_val:4f}")
#%% Sample the new model to generate a test prediction
with open(os.path.join("cache", "train_tst_idx.pkl"), "rb") as f:
train_tst_idx = pickle.load(f)
# datadir = os.path.join("..", "..", "..", "scratch", "multi2swt")
datadir = "data"
mu_path = os.path.join(datadir, "INPUT")
x_u_mesh_path = datadir
sel = np.loadtxt(os.path.join(datadir, "sel.csv"), skiprows=1, delimiter=",")[:, 0].astype("int64")
x_mesh, connectivity, X_v_tst, U_tst, points_idx = \
read_multi_space_sol_input_mesh_txt(hp["n_s_tst"], hp["n_t"], hp["d_t"], train_tst_idx[1],
hp["mesh_idx"],
x_u_mesh_path, mu_path,
hp["mu_idx"], sel)
bathymetry = meshio.read(os.path.join(datadir, "multi_1", "0_FV-Paraview_0.vtk")).point_data["b"][points_idx]
print(bathymetry)
# X_v_tst = np.loadtxt(os.path.join("cache", "X_v_tst.txt"))
# U_tst = np.load(os.path.join("cache", "U_tst.npy"))
# print(U_tst.shape)
# U_tst_des = np.loadtxt(os.path.join("cache", "U_tst.txt"))
# connectivity = np.loadtxt(os.path.join("cache", "connectivity.txt"))
# x_mesh = np.loadtxt(os.path.join("cache", "x_mesh.txt"))
# U_tst = model.restruct(U_tst_des)
print("Elements count: ", connectivity.shape[0])
print("Nodes count: ", x_mesh.shape[0])
U_pred, U_pred_sig = model.predict(X_v_tst)
U_tst_des = model.destruct(U_tst)
err_val = re_s(U_tst_des, U_pred, div_max=True)
print(f"RE_tst: {err_val:4f}")
#%% VTU export
U_pred = model.restruct(U_pred)
U_pred_sig = model.restruct(U_pred_sig)
U_pred_up = U_pred + 2*U_pred_sig
U_pred_lo = U_pred - 2*U_pred_sig
U_pred_0 = model.project_to_U(model.project_to_v(U_tst_des))
U_pred_0 = model.restruct(U_pred_0)
print("Saving to .vtu")
for s in [0]:
print(f"Sample is {X_v_tst[s*hp['n_t']][1]}")
meshio.write_points_cells(os.path.join("cache", f"x_u_tst_pred_{s}.{0}.vtu"),
x_mesh,
[("triangle", connectivity)],
point_data={
"eta": U_tst[0, :, 0, s],
"eta_pred": U_pred_0[0, :, 0, s],
"eta_pred_up": U_pred_0[0, :, 0, s],
"eta_pred_lo": U_pred_0[0, :, 0, s],
})
for i in range(1, hp["n_t"] - 1):
meshio.write_points_cells(os.path.join("cache", f"x_u_tst_pred_{s}.{i}.vtu"),
x_mesh,
[("triangle", connectivity)],
point_data={
"eta": U_tst[0, :, i, s],
"eta_pred": U_pred[0, :, i, s],
"eta_pred_up": U_pred_up[0, :, i, s],
"eta_pred_lo": U_pred_lo[0, :, i, s],
})
#%% Cross section plotting
x = x_mesh[:, 0]
y = x_mesh[:, 1]
dxy = 1.
X, Y = np.mgrid[int(x.min()):int(x.max()):dxy, int(y.min()):int(y.max()):dxy]
method = "linear"
line = (
[274805.820007385, 5043752.94918024],
[274962.057873288, 5043861.33919971],
)
# Load bathymetry
b = np.loadtxt(os.path.join("cache", "b.csv"), delimiter=',', skiprows=1)[:, 5]
# Create coordinates from bathymethry line
num = 1000
line_x = np.linspace(line[0][0], line[1][0], num)
line_y = np.linspace(line[0][1], line[1][1], num)
line_X, line_Y = np.meshgrid(line_x, line_y)
def project(U):
return np.diagonal(griddata((x, y), U, (line_X, line_Y), method=method))
x_prime_max = np.sqrt((line_x.max() - line_x.min())**2 + (line_y.max() - line_y.min())**2)
x_prime = np.linspace(0., x_prime_max, num)
b_ = project(bathymetry)
# Time samples
idx = [0, 5, 20, 100]
s = 0
# Custom loading
# for filename in os.listdir(os.path.join("cache", "azz")):
# U_azzedine = np.loadtxt(os.path.join("cache", "azz", filename))
# U_pred = U_azzedine[:, 0:1]
# U_pred_lo = U_azzedine[:, 2:3]
# U_pred_up = U_azzedine[:, 3:4]
# s = 0
# idx = [int(filename[-17]) * 10]
# print(idx)
for _ in [1]:
n_plot_x = 2*len(idx)
n_plot_y = 5
fig = plt.figure(figsize=figsize(n_plot_x, n_plot_y, scale=1.0))
gs = fig.add_gridspec(n_plot_x, n_plot_y)
ylim = (25.5, 31.5)
for i, t_i in enumerate(idx):
# Projections
U_tst_ = project(U_tst[0, :, t_i, s])
if i == 0:
U_pred_ = project(U_pred_0[0, :, t_i, s])
U_pred_lo_ = project(U_pred_0[0, :, t_i, s])
U_pred_up_ = project(U_pred_0[0, :, t_i, s])
else:
U_pred_ = project(U_pred[0, :, t_i, s])
U_pred_lo_ = project(U_pred_lo[0, :, t_i, s])
U_pred_up_ = project(U_pred_up[0, :, t_i, s])
# U_pred_ = project(U_pred[:, 0])
# U_pred_lo_ = project(U_pred[:, 0])
# U_pred_up_ = project(U_pred[:, 0])
# Plot
ax = fig.add_subplot(gs[2*i:2*i+2, 0:2])
img = plti.imread(f"cache/x_u_tst_pred.{t_i}.png")
ax.imshow(img)
ax.set_xlabel(r"Surface elevation $\eta$")
# ax.set_xlabel(f"$x$")
# ax.set_ylabel(f"$y$")
ax.set_xticks([])
ax.set_yticks([])
ax = fig.add_subplot(gs[2*i:2*i+2, 2:])
lbl = r"{\scriptscriptstyle\textrm{tst},1}"
# ax.plot(x_prime, b, "k:", label="$b$")
ax.fill_between(x_prime, np.zeros_like(b_), b_,
edgecolor="k", alpha=0.3, facecolor="w", hatch="/",
label="$b$")
ax.plot(x_prime, U_pred_, "b-", label=r"$\hat{u}_D(s_{" + lbl + r"})$")
ax.plot(x_prime, U_tst_, "r--", label=r"$u_D(s_{" + lbl + r"})$")
ax.fill_between(x_prime, U_pred_lo_, U_pred_up_,
alpha=0.2, label=r"$2\sigma_D(s_{" + lbl + r"})$")
ax.set_xlabel(f"$x'$")
ax.set_ylabel("$\eta$")
ax.set_ylim(ylim)
ax.set_title(f"$\eta_0={X_v_tst[s*hp['n_t']][1]:.3f}\ m$, $t={t_i * hp['d_t']}\ s$")
if i == 0:
ax.legend()
plt.tight_layout()
# plt.show()
savefig("results/podensnn-swt-samples", True)
# savefig(f"results/{filename}", True)
|
# OpenWeatherMap API Key
weather_api_key = "your key here"
|
# Copyright 2020 The SODA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from oslo_utils import importutils
from delfin import exception
class AlertHandlerTestCase(unittest.TestCase):
ALERT_HANDLER_CLASS = 'delfin.drivers.huawei.oceanstor.alert_handler' \
'.AlertHandler'
def _get_alert_handler(self):
alert_handler_class = importutils.import_class(
self.ALERT_HANDLER_CLASS)
alert_handler = alert_handler_class()
return alert_handler
def _get_fake_alert_info(self):
alert_info = {'storage_id': 'abcd-1234-56789',
'storage_name': 'storage1', 'vendor': 'fake vendor',
'model': 'fake model',
'hwIsmReportingAlarmLocationInfo': 'location1',
'hwIsmReportingAlarmFaultTitle': 'Trap Test Alarm',
'hwIsmReportingAlarmFaultType': 'equipmentFault',
'hwIsmReportingAlarmFaultLevel': 'criticalAlarm',
'hwIsmReportingAlarmAlarmID': '4294967294',
'hwIsmReportingAlarmSerialNo': '4294967295',
'hwIsmReportingAlarmAdditionInfo': 'This is just for '
'testing.Please '
'ignore it',
'hwIsmReportingAlarmFaultCategory': 'faultAlarm',
'hwIsmReportingAlarmLocationAlarmID': '230584300921369',
'hwIsmReportingAlarmFaultTime': '2020-6-25,1:42:26.0'}
return alert_info
def _get_fake_incomplete_alert_info(self):
# hwIsmReportingAlarmFaultCategory is missing here
alert_info = {'storage_id': 'abcd-1234-56789',
'storage_name': 'storage1', 'vendor': 'fake vendor',
'model': 'fake model',
'hwIsmReportingAlarmLocationInfo': 'location1',
'hwIsmReportingAlarmFaultTitle': 'Trap Test Alarm',
'hwIsmReportingAlarmFaultType': 'equipmentFault',
'hwIsmReportingAlarmFaultLevel': 'criticalAlarm',
'hwIsmReportingAlarmAlarmID': '4294967294',
'hwIsmReportingAlarmSerialNo': '4294967295',
'hwIsmReportingAlarmAdditionInfo': 'This is just for '
'testing.Please '
'ignore it',
'hwIsmReportingAlarmLocationAlarmID': '230584300921369',
'hwIsmReportingAlarmFaultTime': '2020-6-25,1:42:26.0'}
return alert_info
def test_parse_alert_with_all_necessary_info(self):
""" Success flow with all necessary parameters"""
alert_handler_inst = self._get_alert_handler()
alert = self._get_fake_alert_info()
expected_alert_model = {'me_dn': alert['storage_id'],
'me_name': alert['storage_name'],
'manufacturer': alert['vendor'],
'product_name': alert['model'],
'category':
alert['hwIsmReportingAlarmFaultCategory'],
'location':
alert['hwIsmReportingAlarmLocationInfo'],
'event_type':
alert['hwIsmReportingAlarmFaultType'],
'severity':
alert['hwIsmReportingAlarmFaultLevel'],
'probable_cause':
alert['hwIsmReportingAlarmAdditionInfo'],
'me_category': 'storage-subsystem',
'alarm_id':
alert['hwIsmReportingAlarmAlarmID'],
'alarm_name':
alert['hwIsmReportingAlarmFaultTitle'],
'device_alert_sn':
alert['hwIsmReportingAlarmSerialNo'],
'occur_time':
alert['hwIsmReportingAlarmFaultTime'],
'clear_type': '',
'match_key': '',
'native_me_dn': ''
}
context = {}
alert_model = alert_handler_inst.parse_alert(context, alert)
# Verify that all other fields are matching
self.assertDictEqual(expected_alert_model, alert_model)
def test_parse_alert_without_mandatory_info(self):
""" Error flow with some mandatory parameters missing"""
alert_handler_inst = self._get_alert_handler()
context = {}
alert = self._get_fake_incomplete_alert_info()
self.assertRaisesRegex(exception.InvalidResults,
"Failed to build alert "
"model as some "
"attributes missing in "
"alert message",
alert_handler_inst.parse_alert, context, alert)
|
from django.core.management.base import BaseCommand, CommandError
import django.core.management as core_management
from django.db import connection
from device_status import models
from djangoautoconf.model_utils.model_attr_utils import enum_models
class Command(BaseCommand):
args = ''
help = 'Create command cache for environment where os.listdir is not working'
def handle(self, *args, **options):
print("please input the app you want to delete the its tables?")
r = raw_input()
# r = "device_status"
if r != "":
app_module = __import__("%s.models" % r, fromlist="dummy")
cursor = connection.cursor()
for model in enum_models(app_module):
try:
cursor.execute("DROP TABLE IF EXISTS %s;" % model.objects.model._meta.db_table)
except:
pass
|
class Something:
def __init__(self, stuff):
self.hello = stuff
allthings = [Something(i) for i in range(5)]
many = lambda x: x.hello >= 3
manythings = filter(many, allthings)
for thing in manythings:
print(thing.hello)
|
# -*- coding: utf-8 -*-
import os
import re
import codecs
import _pickle as cPickle
from nltk import word_tokenize
from nltk.corpus import stopwords
from sklearn.metrics.pairwise import linear_kernel
from warnings import simplefilter
# ignore all warnings
simplefilter(action='ignore')
domain = ""
locale = ""
intent = None
utterance = None
tfidfVec = None
svd = None
trainLSA = None
stops = None
scriptDir = os.path.dirname(__file__)
def initalise(domain_, locale_):
global domain
global locale
global intent
global utterance
global tfidfVec
global svd
global trainLSA
global stops
domain = domain_
locale = locale_
picklePath = os.path.join(scriptDir, '..', '..', 'models', 'tfidf', domain + '_' + locale + '_')
intent = cPickle.load(open(picklePath + 'intent.m', 'rb'))
utterance = cPickle.load(open(picklePath + 'utterance.m', 'rb'))
tfidfVec = cPickle.load(open(picklePath + 'tfidfVec.m', 'rb'))
svd = cPickle.load(open(picklePath + 'svd.m', 'rb'))
trainLSA = cPickle.load(open(picklePath + 'trainLSA.m', 'rb'))
stopwordFile = os.path.join(scriptDir, '..', '..', '..', 'dictionary', 'stopwords_' + locale + '.txt')
arrayWords = []
stopWords = []
sList = [line.rstrip('\n') for line in codecs.open((stopwordFile), 'r+', 'utf-8')]
for line in sList:
if line != "":
arrayWords.append(line.split(','))
for a_word in arrayWords:
for s_word in a_word:
if (re.sub(' ', '', s_word)) != "":
stopWords.append(s_word)
extraStopWords = set(stopWords)
if locale == 'ar':
stops = set(stopwords.words('arabic')) | extraStopWords
elif locale == 'da':
stops = set(stopwords.words('danish')) | extraStopWords
elif locale == 'en':
stops = set(stopwords.words('english')) | extraStopWords
elif locale == 'es':
stops = set(stopwords.words('spanish')) | extraStopWords
elif locale == 'hi':
stops = extraStopWords
elif locale == 'mr':
stops = extraStopWords
elif locale == 'nl':
stops = set(stopwords.words('dutch')) | extraStopWords
elif locale == 'sv':
stops = set(stopwords.words('swedish')) | extraStopWords
else:
stops = set(stopwords.words('english')) | extraStopWords
def stopwordRemover(utterance):
word_tokens = word_tokenize(utterance)
return ' '.join([w for w in word_tokens if not w in stops])
def replace_nth(string, sub, repl, nth):
find = string.find(sub)
i = find != -1
while find != -1 and i != nth:
find = string.find(sub, find + 1)
i += 1
if i == nth:
return string[:find] + repl + string[find + len(sub):]
return string
def wordReplacer(utter, matchedDict, combinations):
matchedDict = matchedDict.copy()
while (len(matchedDict) > 0):
replacement = matchedDict.popitem()
for wordReplacement in replacement[1]['synonym']:
new_utter = utter.replace(replacement[0], wordReplacement)
combinations.append(new_utter)
wordReplacer(new_utter, matchedDict, combinations)
def genSentences(utter, matchedDict, combinations):
matchedDict = matchedDict.copy()
while (len(matchedDict) > 0):
replacement = matchedDict.popitem()
for count in range(replacement[1]['count']):
for wordReplacement in replacement[1]['synonym']:
new_utter = replace_nth(utter, replacement[0], wordReplacement, count + 1)
combinations.append(new_utter)
wordReplacer(new_utter, matchedDict, combinations)
def processUtterance(utter):
scoreList = {}
idList = {}
for query in utter:
query = stopwordRemover(query.lower())
query = [query]
test = tfidfVec.transform(query).toarray()
LSATest = svd.transform(test)
cosineSimilarities = linear_kernel(LSATest, trainLSA).flatten()
related_docs_indices = cosineSimilarities.argsort()[::-1]
for i in range(len(related_docs_indices)):
fID = related_docs_indices[i]
fScore = cosineSimilarities[fID]
fIntent = intent[related_docs_indices[i]]
if (fIntent in scoreList):
scoreList[fIntent] = max(fScore, scoreList[fIntent])
if (fScore > cosineSimilarities[idList.get(fIntent)]):
idList[fIntent] = fID
else:
scoreList[fIntent] = fScore
idList[fIntent] = fID
orderedIntents = sorted(scoreList, key=scoreList.get, reverse=True)
intent_, score_, utterance_ = [], [], []
intent_.append(orderedIntents[0])
intent_.append(orderedIntents[1])
intent_.append(orderedIntents[2])
score_.append("{:.2f}".format(scoreList[orderedIntents[0]]))
score_.append("{:.2f}".format(scoreList[orderedIntents[1]]))
score_.append("{:.2f}".format(scoreList[orderedIntents[2]]))
utterance_.append(utterance[idList.get(orderedIntents[0])])
utterance_.append(utterance[idList.get(orderedIntents[1])])
utterance_.append(utterance[idList.get(orderedIntents[2])])
entities_ = []
intent_ranking_ = [{"name": p, "confidence": q, "utterance": r} for p, q, r in zip(intent_, score_, utterance_)]
intent_top_ = {"name": intent_[0], "confidence": score_[0]}
# build JSON response
response = {}
response['intent'] = intent_top_
response['entities'] = entities_
response['intent_ranking'] = intent_ranking_
response['text'] = utter[0].strip('"')
return response
def genUtterances(utter):
matched = {}
utteranceSet = set(utter.split())
synonymFile = os.path.join(scriptDir, '..', '..', '..', 'dictionary', 'synonyms_' + locale + '.txt')
with codecs.open(synonymFile, 'r', 'utf-8')as rawSynonymsFileobj:
rawSynonyms = rawSynonymsFileobj.read()
rawSynonyms = rawSynonyms.split('\n')
synonymsList = []
for i in rawSynonyms:
synonymsList.append(i.split(','))
for synonym in synonymsList:
for word in set(synonym) & utteranceSet:
count = utter.split().count(word)
matched[word] = {'synonym': list(set(synonym) - set([word])), 'count': count}
combinations = [utter]
genSentences(utter, matched, combinations)
combinations.sort()
return combinations
|
import logging
from typing import Tuple, Dict
import gnmi_pb2
VERSION = "0.2.0"
HOST = "localhost"
PORT = 50061
logging.basicConfig(
format='%(asctime)s:%(relativeCreated)s %(levelname)s:%(filename)s:%(lineno)s:%(funcName)s %(message)s [%(threadName)s]',
level=logging.WARNING)
log = logging.getLogger('confd_gnmi_common')
def common_optparse_options(parser):
parser.add_argument("--logging", action="store", dest="logging",
choices=["error", "warning", "info", "debug"],
help="Logging level",
default="warning")
def common_optparse_process(opt, log):
level = None
if opt.logging == "error":
level = logging.ERROR
elif opt.logging == "warning":
level = logging.WARNING
elif opt.logging == "info":
level = logging.INFO
elif opt.logging == "debug":
level = logging.DEBUG
else:
log.warning("Unknown logging level %s", opt.logging)
set_logging_level(level)
def set_logging_level(level):
if level is not None:
# Thanks https://stackoverflow.com/a/53250066
[logging.getLogger(name).setLevel(level) for name in
logging.root.manager.loggerDict]
# TODO tests
def make_name_keys(elem_string) -> Tuple[str, Dict[str, str]]:
"""
Split element string to element name and keys.
e.g. elem[key1=7][key2=aaa] => (elem, {key1:7, key2:aaa})
:param elem_string:
:return: tuple with element name and key map
"""
log.debug("==> elem_string=%s", elem_string)
keys = {}
name = elem_string
if '[' in elem_string:
ks = elem_string.split("[")
name = ks[0]
for k in ks[1:]:
if k != '':
key = k.replace("]", '').split('=')
keys[key[0]] = key[1]
log.debug("<== name=%s keys=%s", name, keys)
return name, keys
# Crate gNMI Path object from string representation of path
# see: https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md#222-paths
# TODO tests
def make_gnmi_path(xpath_string, origin=None, target=None) -> gnmi_pb2.Path:
"""
Create gnmi path from string path
:param xpath_string:
:param origin:
:param target:
:return:
"""
log.debug("==> path_string=%s origin=%s target=%s",
xpath_string, origin, target)
elems = []
elem_strings = xpath_string.split('/')
log.debug("elem_strings=%s", elem_strings)
for e in elem_strings:
if e != '':
(name, keys) = make_name_keys(e)
elem = gnmi_pb2.PathElem(name=name, key=keys)
elems.append(elem)
path = gnmi_pb2.Path(elem=elems, target=target, origin=origin)
log.debug("<== path=%s", path)
return path
def _make_string_path(gnmi_path=None, gnmi_prefix=None, quote_val=False,
xpath=False) -> str:
"""
Create string path from gnmi_path and gnmi_prefix
:param gnmi_path:
:param gnmi_prefix:
:param quote_val:
:param xpath:
:return:
"""
log.debug("==> gnmi_path=%s gnmi_prefix=%s quote_val=%s xpath=%s",
gnmi_path, gnmi_prefix, quote_val, xpath)
def make_path(gnmi_path):
path = ""
for e in gnmi_path.elem:
path += "/" + e.name
for k, v in e.key.items():
val = v if not quote_val else "\"{}\"".format(v)
path += "[{}={}]".format(k, val) if xpath else "{{{}}}".format(
val)
if path == "":
path = "/"
return path
path_str = ""
if gnmi_prefix is not None and len(gnmi_prefix.elem) > 0:
path_str = make_path(gnmi_prefix)
if gnmi_path is not None:
path_str = path_str + make_path(gnmi_path)
log.debug("<== path_str=%s", path_str)
return path_str
# TODO tests
def make_xpath_path(gnmi_path=None, gnmi_prefix=None, quote_val=False) -> str:
"""
Create string path from gnmi_path and gnmi_prefix
:param gnmi_path:
:param gnmi_prefix:
:param quote_val:
:return:
"""
log.debug("==> gnmi_path=%s gnmi_prefix=%s quote_val=%s",
gnmi_path, gnmi_prefix, quote_val)
path_str = _make_string_path(gnmi_path=gnmi_path, gnmi_prefix=gnmi_prefix,
quote_val=quote_val, xpath=True)
log.debug("<== path_str=%s", path_str)
return path_str
def make_formatted_path(gnmi_path, gnmi_prefix=None, quote_val=False) -> str:
"""
Create string path from gnmi_path and gnmi_prefix
:param gnmi_path:
:param gnmi_prefix:
:param quote_val:
:return:
"""
log.debug("==> gnmi_path=%s gnmi_prefix=%s quote_val=%s",
gnmi_path, gnmi_prefix, quote_val)
path_str = _make_string_path(gnmi_path=gnmi_path, gnmi_prefix=gnmi_prefix,
quote_val=quote_val, xpath=False)
log.debug("<== path_str=%s", path_str)
return path_str
def get_data_type(datatype_str):
datatype_map = {
"ALL": gnmi_pb2.GetRequest.DataType.ALL,
"CONFIG": gnmi_pb2.GetRequest.DataType.CONFIG,
"STATE": gnmi_pb2.GetRequest.DataType.STATE,
"OPERATIONAL": gnmi_pb2.GetRequest.DataType.OPERATIONAL,
}
return datatype_map[datatype_str]
def get_sub_mode(mode_str):
mode_map = {
"ONCE": gnmi_pb2.SubscriptionList.ONCE,
"POLL": gnmi_pb2.SubscriptionList.POLL,
"STREAM": gnmi_pb2.SubscriptionList.STREAM,
}
return mode_map[mode_str]
|
from dearpygui.simple import *
from dearpygui.core import *
from assets import properties
from windows.discord import functions
import json
import base
import platform
from mainwindow import webfunc
with open('user/settings.hqs', 'r') as usersettings:
user = json.load(usersettings)
with open(f'languages/{user["language"]}.hqs', 'r') as language:
lang = json.load(language)
def position(sender, data):
base.center_pos('DiscordWindow')
def close(sender, data):
base.show_main()
delete_item('DiscordWindow', children_only=True)
delete_item('DiscordWindow')
def window():
hide_item('MainWindow', children_only=True)
for widget in properties.MainWindowWidgets:
hide_item(widget)
add_window('DiscordWindow',
label=lang["buttons"]["nav"]["discord"],
autosize=True, on_close=close)
add_image('DiscordTitleImage',
value=properties.DiscordLogo,
source=properties.DiscordLogo,
width=100, height=100)
add_same_line()
with group('DiscordDescriptionGroup'):
add_text('Discord')
add_separator()
add_text(lang["windows"]["discord"]["description"])
add_button(name='DiscordJoinhqsartworksButton', label=lang["buttons"]["windows"]["discord"]["join_hqsartworks"],
callback=webfunc.hqsartworksServerInvite)
add_same_line()
add_button(name='DiscordJoinAlphaclanButton', label=lang["buttons"]["windows"]["discord"]["join_alphaclan"],
callback=webfunc.AlphaclanServerInvite)
set_render_callback(position)
end()
def StartWindow(sender, data):
window()
|
from collections import defaultdict
from commands import SourceSubparser
from models import Variant
from variant_sources import variants_source
from pandas import read_csv
import numpy as np
"""
./snp_parser.py -n -d clinvar_variants clinvar --trait 'Polycystic Kidney Disease'
chrom pos ref alt measureset_type measureset_id rcv allele_id
symbol hgvs_c hgvs_p molecular_consequence clinical_significance
pathogenic benign conflicted review_status gold_stars
all_submitters all_traits all_pmids inheritance_modes
age_of_onset prevalence disease_mechanism origin xrefs
"""
clinvar_args = SourceSubparser(
'clinvar',
help='Arguments for clinvar variants source'
)
clinvar_args.add_command(
'--path',
help='Path to clinvar tab-delimeted file',
default='clinvar/output/b37/single/clinvar_alleles.single.b37.tsv.gz'
)
clinvar_args.add_command(
'--trait',
help='Restrict to variants with given trait'
)
@variants_source
def clinvar_variants(args):
# assert args.clinvar
types = {
'chrom': str,
'pos': np.int32,
'ref': str,
'alt': str,
'symbol': str,
'hgvs_c': str,
'hgvs_p': str,
'molecular_consequence': str,
'all_traits': str
}
df = read_csv(args.path, sep='\t', usecols=types.keys(), dtype=types)
if args.trait:
df = df[df.all_traits.str.contains(args.trait)]
variants_by_gene = defaultdict(list)
for row in df.iterrows():
row = row[1]
gene = row.symbol
v = Variant(
chr_name=row.chrom,
chr_start=row.pos,
chr_end=row.pos+len(row.alt)-len(row.ref),
ref=row.pos,
snp_id=row.hgvs_p,
alts=(row.alt,),
gene=gene
)
variants_by_gene[gene].append(v)
return variants_by_gene
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
head = cur = ListNode(0)
carry = 0
while l1 is not None or l2 is not None:
l1_val = 0 if l1 is None else l1.val
l2_val = 0 if l2 is None else l2.val
value = l1_val + l2_val + carry
if value < 10:
carry = 0
else:
carry = 1
value -= 10
cur.next = ListNode(value)
cur = cur.next
if l1 is not None:
l1 = l1.next
if l2 is not None:
l2 = l2.next
if carry == 1:
cur.next = ListNode(1)
return head.next
|
from enum import Enum
class ColumnTypes(Enum):
Int = 0
String = 1
def to_sql_type(self) -> str:
if self == ColumnTypes.Int:
return "Int"
elif self == ColumnTypes.String:
return "Varchar"
def to_python_type(self) -> type:
if self == ColumnTypes.Int:
return int
elif self == ColumnTypes.String:
return str
|
# Copyright (c) 2019, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OF THE UNIVERSITY OF CALIFORNIA
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
class DockerClientInterface(object):
# Interface of a docker-py client. Returns a DockerContainerInterface
# instance.
def GetContainer(self, *args, **kwargs):
raise NotImplementedError(
"GetContainer in DockerClientInterface not implemented in {}",
str(type(self)))
def CreateContainer(self, *args, **kwargs):
raise NotImplementedError(
"Create in DockerClientInterface not implemented in {}",
str(type(self)))
class DockerContainerInterface(object):
# Can be a real one (backed by docker-py) or a mocked instance.
def Start(self, *args, **kwargs):
raise NotImplementedError(
"Start in DockerInterface not implemented in {}", str(type(self)))
def Stop(self, *args, **kwargs):
raise NotImplementedError(
"Stop in DockerInterface not implemented in {}", str(type(self)))
def Restart(self, *args, **kwargs):
raise NotImplementedError(
"Restart in DockerInterface not implemented in {}", str(type(self)))
def Remove(self, *args, **kwargs):
raise NotImplementedError(
"Remove in DockerInterface not implemented in {}", str(type(self)))
# TODO(shengye): Maybe add "try-except" wrappers to the derived classes and
# capture the exceptions from docker-py and replace them with our own exceptions
|
from unittest import SkipTest
import numpy as np
import pandas as pd
try:
import dask.dataframe as dd
except:
dd = None
from holoviews import Dataset, Curve, Dimension, Scatter, Distribution
from holoviews.core import Apply, Redim
from holoviews.element.comparison import ComparisonTestCase
from holoviews.operation import histogram
try:
from holoviews.operation.datashader import dynspread, datashade, rasterize
except:
dynspread = datashade = rasterize = None
class DatasetPropertyTestCase(ComparisonTestCase):
def setUp(self):
self.df = pd.DataFrame({
'a': [1, 1, 3, 3, 2, 2, 0, 0],
'b': [10, 20, 30, 40, 10, 20, 30, 40],
'c': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'd': [-1, -2, -3, -4, -5, -6, -7, -8]
})
self.ds = Dataset(
self.df,
kdims=[
Dimension('a', label="The a Column"),
Dimension('b', label="The b Column"),
Dimension('c', label="The c Column"),
Dimension('d', label="The d Column"),
]
)
self.ds2 = Dataset(
self.df.iloc[2:],
kdims=[
Dimension('a', label="The a Column"),
Dimension('b', label="The b Column"),
Dimension('c', label="The c Column"),
Dimension('d', label="The d Column"),
]
)
class ConstructorTestCase(DatasetPropertyTestCase):
def test_constructors_dataset(self):
ds = Dataset(self.df)
self.assertIs(ds, ds.dataset)
# Check pipeline
ops = ds.pipeline.operations
self.assertEqual(len(ops), 1)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ds, ds.pipeline(ds.dataset))
def test_constructor_curve(self):
element = Curve(self.df)
expected = Dataset(
self.df,
kdims=self.df.columns[0],
vdims=self.df.columns[1:].tolist(),
)
self.assertEqual(element.dataset, expected)
# Check pipeline
pipeline = element.pipeline
self.assertEqual(len(pipeline.operations), 1)
self.assertIs(pipeline.operations[0].output_type, Curve)
self.assertEqual(element, element.pipeline(element.dataset))
class ToTestCase(DatasetPropertyTestCase):
def test_to_element(self):
curve = self.ds.to(Curve, 'a', 'b', groupby=[])
curve2 = self.ds2.to(Curve, 'a', 'b', groupby=[])
self.assertNotEqual(curve, curve2)
self.assertEqual(curve.dataset, self.ds)
scatter = curve.to(Scatter)
self.assertEqual(scatter.dataset, self.ds)
# Check pipeline
ops = curve.pipeline.operations
self.assertEqual(len(ops), 2)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
# Execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
self.assertEqual(
curve.pipeline(self.ds2), curve2
)
def test_to_holomap(self):
curve_hmap = self.ds.to(Curve, 'a', 'b', groupby=['c'])
# Check HoloMap element datasets
for v in self.df.c.drop_duplicates():
curve = curve_hmap.data[(v,)]
# check dataset
self.assertEqual(
curve.dataset, self.ds
)
# execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
def test_to_holomap_dask(self):
if dd is None:
raise SkipTest("Dask required to test .to with dask dataframe.")
ddf = dd.from_pandas(self.df, npartitions=2)
dds = Dataset(
ddf,
kdims=[
Dimension('a', label="The a Column"),
Dimension('b', label="The b Column"),
Dimension('c', label="The c Column"),
Dimension('d', label="The d Column"),
]
)
curve_hmap = dds.to(Curve, 'a', 'b', groupby=['c'])
# Check HoloMap element datasets
for v in self.df.c.drop_duplicates():
curve = curve_hmap.data[(v,)]
self.assertEqual(
curve.dataset, self.ds
)
# Execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
class CloneTestCase(DatasetPropertyTestCase):
def test_clone(self):
# Dataset
self.assertEqual(self.ds.clone().dataset, self.ds)
# Curve
curve = self.ds.to.curve('a', 'b', groupby=[])
curve_clone = curve.clone()
self.assertEqual(
curve_clone.dataset,
self.ds
)
# Check pipeline carried over
self.assertEqual(
curve.pipeline.operations, curve_clone.pipeline.operations[:2]
)
# Execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
def test_clone_new_data(self):
# Replacing data during clone resets .dataset
ds_clone = self.ds.clone(data=self.ds2.data)
self.assertEqual(ds_clone.dataset, self.ds2)
self.assertEqual(len(ds_clone.pipeline.operations), 1)
class ReindexTestCase(DatasetPropertyTestCase):
def test_reindex_dataset(self):
ds_ab = self.ds.reindex(kdims=['a'], vdims=['b'])
ds2_ab = self.ds2.reindex(kdims=['a'], vdims=['b'])
self.assertNotEqual(ds_ab, ds2_ab)
self.assertEqual(ds_ab.dataset, self.ds)
# Check pipeline
ops = ds_ab.pipeline.operations
self.assertEqual(len(ops), 2)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'reindex')
self.assertEqual(ops[1].args, [])
self.assertEqual(ops[1].kwargs, dict(kdims=['a'], vdims=['b']))
# Execute pipeline
self.assertEqual(ds_ab.pipeline(ds_ab.dataset), ds_ab)
self.assertEqual(
ds_ab.pipeline(self.ds2), ds2_ab
)
def test_double_reindex_dataset(self):
ds_ab = (self.ds
.reindex(kdims=['a'], vdims=['b', 'c'])
.reindex(kdims=['a'], vdims=['b']))
ds2_ab = (self.ds2
.reindex(kdims=['a'], vdims=['b', 'c'])
.reindex(kdims=['a'], vdims=['b']))
self.assertNotEqual(ds_ab, ds2_ab)
self.assertEqual(ds_ab.dataset, self.ds)
# Check pipeline
ops = ds_ab.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'reindex')
self.assertEqual(ops[1].args, [])
self.assertEqual(ops[1].kwargs, dict(kdims=['a'], vdims=['b', 'c']))
self.assertEqual(ops[2].method_name, 'reindex')
self.assertEqual(ops[2].args, [])
self.assertEqual(ops[2].kwargs, dict(kdims=['a'], vdims=['b']))
# Execute pipeline
self.assertEqual(ds_ab.pipeline(ds_ab.dataset), ds_ab)
self.assertEqual(
ds_ab.pipeline(self.ds2), ds2_ab
)
def test_reindex_curve(self):
curve_ba = self.ds.to(
Curve, 'a', 'b', groupby=[]
).reindex(kdims='b', vdims='a')
curve2_ba = self.ds2.to(
Curve, 'a', 'b', groupby=[]
).reindex(kdims='b', vdims='a')
self.assertNotEqual(curve_ba, curve2_ba)
self.assertEqual(curve_ba.dataset, self.ds)
# Check pipeline
ops = curve_ba.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertEqual(ops[2].method_name, 'reindex')
self.assertEqual(ops[2].args, [])
self.assertEqual(ops[2].kwargs, dict(kdims='b', vdims='a'))
# Execute pipeline
self.assertEqual(curve_ba.pipeline(curve_ba.dataset), curve_ba)
self.assertEqual(
curve_ba.pipeline(self.ds2), curve2_ba
)
def test_double_reindex_curve(self):
curve_ba = self.ds.to(
Curve, 'a', ['b', 'c'], groupby=[]
).reindex(kdims='a', vdims='b').reindex(kdims='b', vdims='a')
curve2_ba = self.ds2.to(
Curve, 'a', ['b', 'c'], groupby=[]
).reindex(kdims='a', vdims='b').reindex(kdims='b', vdims='a')
self.assertNotEqual(curve_ba, curve2_ba)
self.assertEqual(curve_ba.dataset, self.ds)
# Check pipeline
ops = curve_ba.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertEqual(ops[2].method_name, 'reindex')
self.assertEqual(ops[2].args, [])
self.assertEqual(ops[2].kwargs, dict(kdims='a', vdims='b'))
self.assertEqual(ops[3].method_name, 'reindex')
self.assertEqual(ops[3].args, [])
self.assertEqual(ops[3].kwargs, dict(kdims='b', vdims='a'))
# Execute pipeline
self.assertEqual(curve_ba.pipeline(curve_ba.dataset), curve_ba)
self.assertEqual(
curve_ba.pipeline(self.ds2), curve2_ba
)
class IlocTestCase(DatasetPropertyTestCase):
def test_iloc_dataset(self):
ds_iloc = self.ds.iloc[[0, 2]]
ds2_iloc = self.ds2.iloc[[0, 2]]
self.assertNotEqual(ds_iloc, ds2_iloc)
# Dataset
self.assertEqual(
ds_iloc.dataset,
self.ds
)
# Check pipeline
ops = ds_iloc.pipeline.operations
self.assertEqual(len(ops), 2)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, '_perform_getitem')
self.assertEqual(ops[1].args, [[0, 2]])
self.assertEqual(ops[1].kwargs, {})
# Execute pipeline
self.assertEqual(ds_iloc.pipeline(ds_iloc.dataset), ds_iloc)
self.assertEqual(
ds_iloc.pipeline(self.ds2), ds2_iloc
)
def test_iloc_curve(self):
# Curve
curve_iloc = self.ds.to.curve('a', 'b', groupby=[]).iloc[[0, 2]]
curve2_iloc = self.ds2.to.curve('a', 'b', groupby=[]).iloc[[0, 2]]
self.assertNotEqual(curve_iloc, curve2_iloc)
self.assertEqual(
curve_iloc.dataset,
self.ds
)
# Check pipeline
ops = curve_iloc.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertEqual(ops[2].method_name, '_perform_getitem')
self.assertEqual(ops[2].args, [[0, 2]])
self.assertEqual(ops[2].kwargs, {})
# Execute pipeline
self.assertEqual(curve_iloc.pipeline(curve_iloc.dataset), curve_iloc)
self.assertEqual(
curve_iloc.pipeline(self.ds2), curve2_iloc
)
class NdlocTestCase(DatasetPropertyTestCase):
def setUp(self):
super(NdlocTestCase, self).setUp()
self.ds_grid = Dataset(
(np.arange(4),
np.arange(3),
np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]])),
kdims=['x', 'y'],
vdims='z'
)
self.ds2_grid = Dataset(
(np.arange(3),
np.arange(3),
np.array([[1, 2, 4],
[5, 6, 8],
[9, 10, 12]])),
kdims=['x', 'y'],
vdims='z'
)
def test_ndloc_dataset(self):
ds_grid_ndloc = self.ds_grid.ndloc[0:2, 1:3]
ds2_grid_ndloc = self.ds2_grid.ndloc[0:2, 1:3]
self.assertNotEqual(ds_grid_ndloc, ds2_grid_ndloc)
# Dataset
self.assertEqual(
ds_grid_ndloc.dataset,
self.ds_grid
)
# Check pipeline
ops = ds_grid_ndloc.pipeline.operations
self.assertEqual(len(ops), 2)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, '_perform_getitem')
self.assertEqual(
ops[1].args, [(slice(0, 2, None), slice(1, 3, None))]
)
self.assertEqual(ops[1].kwargs, {})
# Execute pipeline
self.assertEqual(
ds_grid_ndloc.pipeline(ds_grid_ndloc.dataset), ds_grid_ndloc
)
self.assertEqual(
ds_grid_ndloc.pipeline(self.ds2_grid), ds2_grid_ndloc
)
class SelectTestCase(DatasetPropertyTestCase):
def test_select_dataset(self):
ds_select = self.ds.select(b=10)
ds2_select = self.ds2.select(b=10)
self.assertNotEqual(ds_select, ds2_select)
# Dataset
self.assertEqual(
ds_select.dataset,
self.ds
)
# Check pipeline
ops = ds_select.pipeline.operations
self.assertEqual(len(ops), 2)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'select')
self.assertEqual(ops[1].args, [])
self.assertEqual(ops[1].kwargs, {'b': 10})
# Execute pipeline
self.assertEqual(ds_select.pipeline(ds_select.dataset), ds_select)
self.assertEqual(
ds_select.pipeline(self.ds2), ds2_select
)
def test_select_curve(self):
curve_select = self.ds.to.curve('a', 'b', groupby=[]).select(b=10)
curve2_select = self.ds2.to.curve('a', 'b', groupby=[]).select(b=10)
self.assertNotEqual(curve_select, curve2_select)
# Curve
self.assertEqual(
curve_select.dataset,
self.ds
)
# Check pipeline
ops = curve_select.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertEqual(ops[2].method_name, 'select')
self.assertEqual(ops[2].args, [])
self.assertEqual(ops[2].kwargs, {'b': 10})
# Execute pipeline
self.assertEqual(
curve_select.pipeline(curve_select.dataset), curve_select
)
self.assertEqual(
curve_select.pipeline(self.ds2), curve2_select
)
class SortTestCase(DatasetPropertyTestCase):
def test_sort_curve(self):
curve_sorted = self.ds.to.curve('a', 'b', groupby=[]).sort('a')
curve_sorted2 = self.ds2.to.curve('a', 'b', groupby=[]).sort('a')
self.assertNotEqual(curve_sorted, curve_sorted2)
# Curve
self.assertEqual(
curve_sorted.dataset,
self.ds
)
# Check pipeline
ops = curve_sorted.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertEqual(ops[2].method_name, 'sort')
self.assertEqual(ops[2].args, ['a'])
self.assertEqual(ops[2].kwargs, {})
# Execute pipeline
self.assertEqual(
curve_sorted.pipeline(curve_sorted.dataset), curve_sorted
)
self.assertEqual(
curve_sorted.pipeline(self.ds2), curve_sorted2
)
class SampleTestCase(DatasetPropertyTestCase):
def test_sample_curve(self):
curve_sampled = self.ds.to.curve('a', 'b', groupby=[]).sample([1, 2])
curve_sampled2 = self.ds2.to.curve('a', 'b', groupby=[]).sample([1, 2])
self.assertNotEqual(curve_sampled, curve_sampled2)
# Curve
self.assertEqual(
curve_sampled.dataset,
self.ds
)
# Check pipeline
ops = curve_sampled.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertEqual(ops[2].method_name, 'sample')
self.assertEqual(ops[2].args, [[1, 2]])
self.assertEqual(ops[2].kwargs, {})
# Execute pipeline
self.assertEqual(
curve_sampled.pipeline(curve_sampled.dataset), curve_sampled
)
self.assertEqual(
curve_sampled.pipeline(self.ds2), curve_sampled2
)
class ReduceTestCase(DatasetPropertyTestCase):
def test_reduce_dataset(self):
ds_reduced = self.ds.reindex(
kdims=['b', 'c'], vdims=['a', 'd']
).reduce('c', function=np.sum)
ds2_reduced = self.ds2.reindex(
kdims=['b', 'c'], vdims=['a', 'd']
).reduce('c', function=np.sum)
self.assertNotEqual(ds_reduced, ds2_reduced)
self.assertEqual(ds_reduced.dataset, self.ds)
self.assertEqual(ds2_reduced.dataset, self.ds2)
# Check pipeline
ops = ds_reduced.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'reindex')
self.assertEqual(ops[2].method_name, 'reduce')
self.assertEqual(ops[2].args, ['c'])
self.assertEqual(ops[2].kwargs, {'function': np.sum})
# Execute pipeline
self.assertEqual(ds_reduced.pipeline(ds_reduced.dataset), ds_reduced)
self.assertEqual(
ds_reduced.pipeline(self.ds2), ds2_reduced
)
class AggregateTestCase(DatasetPropertyTestCase):
def test_aggregate_dataset(self):
ds_aggregated = self.ds.reindex(
kdims=['b', 'c'], vdims=['a', 'd']
).aggregate('b', function=np.sum)
ds2_aggregated = self.ds2.reindex(
kdims=['b', 'c'], vdims=['a', 'd']
).aggregate('b', function=np.sum)
self.assertNotEqual(ds_aggregated, ds2_aggregated)
self.assertEqual(ds_aggregated.dataset, self.ds)
self.assertEqual(ds2_aggregated.dataset, self.ds2)
# Check pipeline
ops = ds_aggregated.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'reindex')
self.assertEqual(ops[2].method_name, 'aggregate')
self.assertEqual(ops[2].args, ['b'])
self.assertEqual(ops[2].kwargs, {'function': np.sum})
# Execute pipeline
self.assertEqual(
ds_aggregated.pipeline(ds_aggregated.dataset), ds_aggregated
)
self.assertEqual(
ds_aggregated.pipeline(self.ds2), ds2_aggregated
)
class GroupbyTestCase(DatasetPropertyTestCase):
def test_groupby_dataset(self):
ds_groups = self.ds.reindex(
kdims=['b', 'c'], vdims=['a', 'd']
).groupby('b')
ds2_groups = self.ds2.reindex(
kdims=['b', 'c'], vdims=['a', 'd']
).groupby('b')
self.assertNotEqual(ds_groups, ds2_groups)
for k in ds_groups.keys():
ds_group = ds_groups[k]
ds2_group = ds2_groups[k]
# Check pipeline
ops = ds_group.pipeline.operations
self.assertNotEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'reindex')
self.assertEqual(ops[2].method_name, 'groupby')
self.assertEqual(ops[2].args, ['b'])
self.assertEqual(ops[3].method_name, '__getitem__')
self.assertEqual(ops[3].args, [k])
# Execute pipeline
self.assertEqual(ds_group.pipeline(ds_group.dataset), ds_group)
self.assertEqual(
ds_group.pipeline(self.ds2), ds2_group
)
class AddDimensionTestCase(DatasetPropertyTestCase):
def test_add_dimension_dataset(self):
ds_dim_added = self.ds.add_dimension('new', 1, 17)
ds2_dim_added = self.ds2.add_dimension('new', 1, 17)
self.assertNotEqual(ds_dim_added, ds2_dim_added)
# Check dataset
self.assertEqual(ds_dim_added.dataset, self.ds)
self.assertEqual(ds2_dim_added.dataset, self.ds2)
# Check pipeline
ops = ds_dim_added.pipeline.operations
self.assertEqual(len(ops), 2)
self.assertIs(ops[0].output_type, Dataset)
self.assertEqual(ops[1].method_name, 'add_dimension')
self.assertEqual(ops[1].args, ['new', 1, 17])
self.assertEqual(ops[1].kwargs, {})
# Execute pipeline
self.assertEqual(
ds_dim_added.pipeline(ds_dim_added.dataset), ds_dim_added
)
self.assertEqual(
ds_dim_added.pipeline(self.ds2), ds2_dim_added,
)
# Add execute pipeline test for each method, using a different dataset (ds2)
#
class HistogramTestCase(DatasetPropertyTestCase):
def setUp(self):
super(HistogramTestCase, self).setUp()
self.hist = self.ds.hist('a', adjoin=False, normed=False)
def test_construction(self):
self.assertEqual(self.hist.dataset, self.ds)
def test_clone(self):
self.assertEqual(self.hist.clone().dataset, self.ds)
def test_select_single(self):
sub_hist = self.hist.select(a=(1, None))
self.assertEqual(sub_hist.dataset, self.ds)
# Check pipeline
ops = sub_hist.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Apply)
self.assertEqual(ops[2].method_name, '__call__')
self.assertIsInstance(ops[2].args[0], histogram)
self.assertEqual(ops[3].method_name, 'select')
self.assertEqual(ops[3].args, [])
self.assertEqual(ops[3].kwargs, {'a': (1, None)})
# Execute pipeline
self.assertEqual(sub_hist.pipeline(sub_hist.dataset), sub_hist)
def test_select_multi(self):
# Add second selection on b. b is a dimension in hist.dataset but
# not in hist. Make sure that we only apply the a selection (and not
# the b selection) to the .dataset property
sub_hist = self.hist.select(a=(1, None), b=100)
self.assertNotEqual(
sub_hist.dataset,
self.ds.select(a=(1, None), b=100)
)
# Check dataset unchanged
self.assertEqual(
sub_hist.dataset,
self.ds
)
# Check pipeline
ops = sub_hist.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Apply)
self.assertEqual(ops[2].method_name, '__call__')
self.assertIsInstance(ops[2].args[0], histogram)
self.assertEqual(ops[3].method_name, 'select')
self.assertEqual(ops[3].args, [])
self.assertEqual(ops[3].kwargs, {'a': (1, None), 'b': 100})
# Execute pipeline
self.assertEqual(sub_hist.pipeline(sub_hist.dataset), sub_hist)
def test_hist_to_curve(self):
# No exception thrown
curve = self.hist.to.curve()
# Check pipeline
ops = curve.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Apply)
self.assertEqual(ops[2].method_name, '__call__')
self.assertIsInstance(ops[2].args[0], histogram)
self.assertIs(ops[3].output_type, Curve)
# Execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
class DistributionTestCase(DatasetPropertyTestCase):
def setUp(self):
super(DistributionTestCase, self).setUp()
self.distribution = self.ds.to(Distribution, kdims='a', groupby=[])
def test_distribution_dataset(self):
self.assertEqual(self.distribution.dataset, self.ds)
# Execute pipeline
self.assertEqual(
self.distribution.pipeline(self.distribution.dataset),
self.distribution,
)
class DatashaderTestCase(DatasetPropertyTestCase):
def setUp(self):
if None in (rasterize, datashade, dynspread):
raise SkipTest('Datashader could not be imported and cannot be tested.')
super(DatashaderTestCase, self).setUp()
def test_rasterize_curve(self):
img = rasterize(
self.ds.to(Curve, 'a', 'b', groupby=[]), dynamic=False
)
img2 = rasterize(
self.ds2.to(Curve, 'a', 'b', groupby=[]), dynamic=False
)
self.assertNotEqual(img, img2)
# Check dataset
self.assertEqual(img.dataset, self.ds)
# Check pipeline
ops = img.pipeline.operations
self.assertEqual(len(ops), 3)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertIsInstance(ops[2], rasterize)
# Execute pipeline
self.assertEqual(img.pipeline(img.dataset), img)
self.assertEqual(img.pipeline(self.ds2), img2)
def test_datashade_curve(self):
rgb = dynspread(datashade(
self.ds.to(Curve, 'a', 'b', groupby=[]), dynamic=False
), dynamic=False)
rgb2 = dynspread(datashade(
self.ds2.to(Curve, 'a', 'b', groupby=[]), dynamic=False
), dynamic=False)
self.assertNotEqual(rgb, rgb2)
# Check dataset
self.assertEqual(rgb.dataset, self.ds)
# Check pipeline
ops = rgb.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertIsInstance(ops[2], datashade)
self.assertIsInstance(ops[3], dynspread)
# Execute pipeline
self.assertEqual(rgb.pipeline(rgb.dataset), rgb)
self.assertEqual(rgb.pipeline(self.ds2), rgb2)
class AccessorTestCase(DatasetPropertyTestCase):
def test_apply_curve(self):
curve = self.ds.to.curve('a', 'b', groupby=[]).apply(
lambda c: Scatter(c.select(b=(20, None)).data)
)
curve2 = self.ds2.to.curve('a', 'b', groupby=[]).apply(
lambda c: Scatter(c.select(b=(20, None)).data)
)
self.assertNotEqual(curve, curve2)
# Check pipeline
ops = curve.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertIs(ops[2].output_type, Apply)
self.assertEqual(ops[2].kwargs, {'mode': None})
self.assertEqual(ops[3].method_name, '__call__')
# Execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
self.assertEqual(
curve.pipeline(self.ds2), curve2
)
def test_redim_curve(self):
curve = self.ds.to.curve('a', 'b', groupby=[]).redim.unit(
a='kg', b='m'
)
curve2 = self.ds2.to.curve('a', 'b', groupby=[]).redim.unit(
a='kg', b='m'
)
self.assertNotEqual(curve, curve2)
# Check pipeline
ops = curve.pipeline.operations
self.assertEqual(len(ops), 4)
self.assertIs(ops[0].output_type, Dataset)
self.assertIs(ops[1].output_type, Curve)
self.assertIs(ops[2].output_type, Redim)
self.assertEqual(ops[2].kwargs, {'mode': 'dataset'})
self.assertEqual(ops[3].method_name, '__call__')
# Execute pipeline
self.assertEqual(curve.pipeline(curve.dataset), curve)
self.assertEqual(
curve.pipeline(self.ds2), curve2
)
|
# --------------------------------------------------------------------------------- #
# KNOBCTRL wxPython IMPLEMENTATION
#
# Andrea Gavana, @ 03 Nov 2006
# Latest Revision: 03 Nov 2006, 22.30 CET
#
#
# TODO List
#
# 1. Any idea?
#
# For All Kind Of Problems, Requests Of Enhancements And Bug Reports, Please
# Write To Me At:
#
# gavana@kpo.kz
# andrea.gavana@gmail.com
#
# Or, Obviously, To The wxPython Mailing List!!!
#
#
# End Of Comments
# --------------------------------------------------------------------------------- #
"""
Description
===========
KnobCtrl lets the user select a numerical value by rotating it. It works like a
scrollbar: just set the ticks range property and read the value property in the
associated KC_EVENT_ANGLE_CHANGING/KC_EVENT_ANGLE_CHANGED events. Simple but
effective. It can be easily used if you want to simulate the volume knob of a
music player or similar functionalities.
Events
======
KnobCtrl implements two events that can be intercepted by the user:
- KC_EVENT_ANGLE_CHANGING
- KC_EVENT_ANGLE_CHANGED
The first one can be "vetoed" by eliminating the event.Skip() at the end of the
event handler.
Supported Platforms
===================
KnobCtrl has been tested on the following platforms:
* Windows (Windows XP);
* Linux Ubuntu (Dapper 6.06)
License And Version:
===================
KnobCtrl is freeware and distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 03 Nov 2006, 22.30 CET
Version 0.1
"""
import wx
import math
# Flag to use double buffering (recommendeded = 1)
KC_BUFFERED_DC = 1
"""Flag to use double buffering (recommendeded = 1)"""
# Events
wxKC_EVENT_ANGLE_CHANGING = wx.NewEventType()
wxKC_EVENT_ANGLE_CHANGED = wx.NewEventType()
KC_EVENT_ANGLE_CHANGING = wx.PyEventBinder(wxKC_EVENT_ANGLE_CHANGING, 1)
"""Notify the client that the knob is changing its value."""
KC_EVENT_ANGLE_CHANGED = wx.PyEventBinder(wxKC_EVENT_ANGLE_CHANGED, 1)
"""Notify the client that the knob has changed its value."""
# ---------------------------------------------------------------------------- #
# Class KnobCtrlEvent
# ---------------------------------------------------------------------------- #
class KnobCtrlEvent(wx.PyCommandEvent):
"""
Represent details of the events that the KnobCtrl object sends.
"""
def __init__(self, eventType, id=1):
"""Default class constructor."""
wx.PyCommandEvent.__init__(self, eventType, id)
def SetOldValue(self, oldValue):
"""Sets the old KnobCtrl value for this event."""
self._oldValue = oldValue
def GetOldValue(self):
"""Returns the old KnobCtrl value for this event."""
return self._oldValue
def SetValue(self, value):
"""Sets the new KnobCtrl value for this event."""
self._value = value
def GetValue(self):
"""Returns the new KnobCtrl value for this event."""
return self._value
#----------------------------------------------------------------------
# BUFFERENDWINDOW Class
# This Class Has Been Taken From The wxPython Wiki, And Slightly
# Adapted To Fill My Needs. See:
#
# http://wiki.wxpython.org/index.cgi/DoubleBufferedDrawing
#
# For More Info About DC And Double Buffered Drawing.
#----------------------------------------------------------------------
class BufferedWindow(wx.Window):
"""
A Buffered window class.
To use it, subclass it and define a Draw(DC) method that takes a DC
to draw to. In that method, put the code needed to draw the picture
you want. The window will automatically be double buffered, and the
screen will be automatically updated when a Paint event is received.
When the drawing needs to change, you app needs to call the
UpdateDrawing() method. Since the drawing is stored in a bitmap, you
can also save the drawing to file by calling the
SaveToFile(self,file_name,file_type) method.
"""
def __init__(self, parent, id,
pos = wx.DefaultPosition,
size = wx.DefaultSize,
style=wx.NO_FULL_REPAINT_ON_RESIZE,
bufferedstyle=KC_BUFFERED_DC):
wx.Window.__init__(self, parent, id, pos, size, style)
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)
# OnSize called to make sure the buffer is initialized.
# This might result in OnSize getting called twice on some
# platforms at initialization, but little harm done.
self.OnSize(None)
def Draw(self, dc):
## just here as a place holder.
## This method should be over-ridden when sub-classed
pass
def OnPaint(self, event):
# All that is needed here is to draw the buffer to screen
if self._bufferedstyle == KC_BUFFERED_DC:
dc = wx.BufferedPaintDC(self, self._Buffer)
else:
dc = wx.PaintDC(self)
dc.DrawBitmap(self._Buffer,0,0)
def OnSize(self,event):
# The Buffer init is done here, to make sure the buffer is always
# the same size as the Window
self.Width, self.Height = self.GetClientSizeTuple()
# Make new off screen bitmap: this bitmap will always have the
# current drawing in it, so it can be used to save the image to
# a file, or whatever.
# This seems required on MacOS, it doesn't like wx.EmptyBitmap with
# size = (0, 0)
# Thanks to Gerard Grazzini
if "__WXMAC__" in wx.Platform:
if self.Width == 0:
self.Width = 1
if self.Height == 0:
self.Height = 1
self._Buffer = wx.EmptyBitmap(self.Width, self.Height)
memory = wx.MemoryDC()
memory.SelectObject(self._Buffer)
memory.SetBackground(wx.Brush(self.GetBackgroundColour()))
memory.SetPen(wx.TRANSPARENT_PEN)
memory.Clear()
minradius = min(0.9*self.Width/2, 0.9*self.Height/2)
memory.DrawCircle(self.Width/2, self.Height/2, minradius)
memory.SelectObject(wx.NullBitmap)
self._region = wx.RegionFromBitmapColour(self._Buffer, self.GetBackgroundColour())
self._minradius = minradius
self.UpdateDrawing()
def UpdateDrawing(self):
"""
This would get called if the drawing needed to change, for whatever reason.
The idea here is that the drawing is based on some data generated
elsewhere in the system. If that data changes, the drawing needs to
be updated.
"""
if self._bufferedstyle == KC_BUFFERED_DC:
dc = wx.BufferedDC(wx.ClientDC(self), self._Buffer)
self.Draw(dc)
else:
# update the buffer
dc = wx.MemoryDC()
dc.SelectObject(self._Buffer)
self.Draw(dc)
# update the screen
wx.ClientDC(self).Blit(0, 0, self.Width, self.Height, dc, 0, 0)
# ---------------------------------------------------------------------------- #
# Class KnobCtrl
# ---------------------------------------------------------------------------- #
class KnobCtrl(BufferedWindow):
"""
This class can be used to simulate a knob volume control often found in
PC music players.
"""
def __init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition,
size=wx.DefaultSize,
bufferedstyle=KC_BUFFERED_DC):
"""
Default class constructor.
Non-wxPython parameter:
- bufferedstyle: if equal to 1 (KC_BUFFERED_DC) then a double
buffering is performed while drawing, otherwise the standard
OnPaint is used.
"""
self._bufferedstyle = bufferedstyle
self._knobcolour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_3DFACE)
self._startcolour = wx.WHITE
self._endcolour = wx.Colour(170, 170, 150)
self._tagscolour = wx.BLACK
self._boundingcolour = wx.WHITE
self._tags = []
self._anglestart = -45
self._angleend = 180
self._state = 0
self._minvalue = 0
self._maxvalue = 100
self._old_ang = 0
self._trackposition = 0
self._knobradius = 4
BufferedWindow.__init__(self, parent, id, pos, size,
style=wx.NO_FULL_REPAINT_ON_RESIZE,
bufferedstyle=bufferedstyle)
self.Bind(wx.EVT_MOUSE_EVENTS, self.OnMouseEvents)
self.SetValue(self._trackposition)
def OnMouseEvents(self, event):
"""Handles all the wx.EVT_MOUSE_EVENTS for KnobCtrl."""
if self._state == 0 and event.Entering():
self._state = 1
elif self._state >= 1 and event.Leaving():
self._state = 0
elif self._state == 1 and event.LeftDown():
self._state = 2
self._mousePosition = event.GetPosition()
self.SetTrackPosition()
elif self._state == 2 and event.LeftIsDown():
self._mousePosition = event.GetPosition()
self.SetTrackPosition()
elif self._state == 2 and event.LeftUp():
self._state = 1
def SetTags(self, tags):
"""
Sets the tags for KnobCtrl. The tags are a list of integers ranging
from minvalue to maxvalue.
"""
self._tags = tags
if min(tags) < self._minvalue:
self._minvalue = min(tags)
if max(tags) > self._maxvalue:
self._maxvalue = max(tags)
self.OnSize(None)
def GetMinValue(self):
"""Returns the minimum value for KnobCtrl."""
return self._minvalue
def GetMaxValue(self):
"""Returns the maximum value for KnobCtrl."""
return self._maxvalue
def GetKnobRadius(self):
"""Returns the knob radius."""
return self._knobradius
def SetKnobRadius(self, radius):
"""Sets the knob radius."""
if radius <= 0:
return
self._knobradius = radius
self.UpdateDrawing()
def GetTags(self):
"""Returns the KnobCtrl tags."""
return self._tags
def SetTagsColour(self, colour):
"""Sets the tags colour."""
self._tagscolour = colour
self.UpdateDrawing()
def GetTagsColour(self):
"""Returns the tags colour."""
return self._tagscolour
def SetBoundingColour(self, colour):
"""Sets the bounding circle colour."""
self._boundingcolour = colour
self.UpdateDrawing()
def GetBoundingColour(self):
"""Returns the bounding circle colour."""
return self._boundingcolour
def SetFirstGradientColour(self, colour):
"""Sets the first gradient colour for shading."""
self._startcolour = colour
self.UpdateDrawing()
def GetFirstGradientColour(self):
"""Returns the first gradient colour for shading."""
return self._startcolour
def SetSecondGradientColour(self, colour):
"""Sets the second gradient colour for shading."""
self._endcolour = colour
self.UpdateDrawing()
def GetSecondGradientColour(self):
"""Returns the second gradient colour for shading."""
return self._endcolour
def SetAngularRange(self, start, end):
"""
Sets the angular range for KnobCtrl. The start and end angle are given
in degrees, clockwise.
"""
self._anglestart = start
self._angleend = end
self.UpdateDrawing()
def GetAngularRange(self):
"""
Returns the angular range for KnobCtrl. The start and end angle are given
in degrees, clockwise.
"""
return self._anglestart, self._angleend
def Draw(self, dc):
"""
Draws everything on the empty bitmap.
Here all the chosen styles are applied.
"""
size = self.GetClientSize()
if size.x < 21 or size.y < 21:
return
dc.SetClippingRegionAsRegion(self._region)
self.DrawDiagonalGradient(dc, size)
self.DrawInsetCircle(dc, self._knobcolour)
dc.DestroyClippingRegion()
self.DrawBoundingCircle(dc, size)
if self._tags:
self.DrawTags(dc, size)
def DrawTags(self, dc, size):
"""Draws the tags."""
deltarange = abs(self._tags[-1] - self._tags[0])
deltaangle = self._angleend - self._anglestart
width = size.x
height = size.y
xshift = 0
yshift = 0
if width > height:
xshift = width - height
elif width < height:
yshift = height - width
coeff = float(deltaangle)/float(deltarange)
dcPen = wx.Pen(self._tagscolour, 1)
for tags in self._tags:
if tags == self._tags[0] or tags == self._tags[-1]:
# draw first and last tags bigger
dcPen.SetWidth(2)
tagLen = 8
else:
dcPen.SetWidth(1)
tagLen = 6
dc.SetPen(dcPen)
tg = tags - self._tags[0]
angle = tg*coeff + self._anglestart
angle = angle*math.pi/180.0
sxi = math.cos(angle)*(width - xshift + tagLen - 6)/2.0
syi = math.sin(angle)*(height - yshift + tagLen - 6)/2.0
dxi = math.cos(angle)*((width - xshift + tagLen - 6)/2.0 - tagLen)
dyi = math.sin(angle)*((height - yshift + tagLen - 6)/2.0 - tagLen)
dc.DrawLine(width/2 - sxi, height/2 - syi,
width/2 - dxi, height/2 - dyi)
def DrawDiagonalGradient(self, dc, size):
"""Draw a shding of diagonal gradient to KnobCtrl."""
col1 = self._startcolour
col2 = self._endcolour
r1, g1, b1 = int(col1.Red()), int(col1.Green()), int(col1.Blue())
r2, g2, b2 = int(col2.Red()), int(col2.Green()), int(col2.Blue())
maxsize = max(size.x, size.y)
flrect = maxsize
rstep = float((r2 - r1)) / flrect
gstep = float((g2 - g1)) / flrect
bstep = float((b2 - b1)) / flrect
rf, gf, bf = 0, 0, 0
dc.SetBrush(wx.TRANSPARENT_BRUSH)
for ii in xrange(0, maxsize, 2):
currCol = (r1 + rf, g1 + gf, b1 + bf)
dc.SetPen(wx.Pen(currCol, 2))
dc.DrawLine(0, ii+2, ii+2, 0)
rf = rf + rstep
gf = gf + gstep
bf = bf + bstep
for ii in xrange(0, maxsize, 2):
currCol = (r1 + rf, g1 + gf, b1 + bf)
dc.SetPen(wx.Pen(currCol, 2))
dc.DrawLine(ii+2, maxsize, maxsize, ii+2)
rf = rf + rstep
gf = gf + gstep
bf = bf + bstep
def OffsetColor(self, color, offset):
"""Used internally."""
byRed = 0
byGreen = 0
byBlue = 0
offsetR = offset
offsetG = offset
offsetB = offset
if offset < -255 or offset> 255:
return color
# Get RGB components of specified color
byRed = color.Red()
byGreen = color.Green()
byBlue = color.Blue()
# Calculate max. allowed real offset
if offset > 0:
if byRed + offset > 255:
offsetR = 255 - byRed
if byGreen + offset > 255:
offsetG = 255 - byGreen
if byBlue + offset > 255:
offsetB = 255 - byBlue
offset = min(min(offsetR, offsetG), offsetB)
else:
if byRed + offset < 0:
offsetR = -byRed
if byGreen + offset < 0:
offsetG = -byGreen
if byBlue + offset < 0:
offsetB = -byBlue
offset = max(max(offsetR, offsetG), offsetB)
c1 = wx.Colour(byRed + offset, byGreen + offset, byBlue + offset)
return c1
def DrawInsetCircle(self, dc, pencolour):
"""Draws the small knob."""
self._knobcenter = self.CircleCoords(self._minradius*0.8, self.GetTrackPosition(),
self.Width/2, self.Height/2)
cx, cy = self._knobcenter
r = self._knobradius
p1 = wx.Pen(self.OffsetColor(pencolour, -70), 2)
p2 = wx.Pen(self.OffsetColor(pencolour, 10), 1)
pt1 = wx.Point(cx-r*math.sqrt(2)/2, cy+r*math.sqrt(2)/2)
pt2 = wx.Point(cx+r*math.sqrt(2)/2, cy-r*math.sqrt(2)/2)
dc.SetPen(p2)
dc.DrawArcPoint(pt1, pt2, (cx, cy))
dc.SetPen(p1)
dc.DrawArcPoint(pt2, pt1, (cx, cy))
def DrawBoundingCircle(self, dc, size):
"""Draws the KnobCtrl bounding circle."""
radius = 0.9*min(size.x, size.y)/2
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.SetPen(wx.Pen(self._boundingcolour))
dc.DrawCircle(self.Width/2, self.Height/2, radius)
def CircleCoords(self, radius, angle, centerX, centerY):
"""Used internally."""
x = radius*math.cos(angle) + centerX
y = radius*math.sin(angle) + centerY
return x, y
def SetTrackPosition(self):
"""Used internally."""
width, height = self.GetSize()
x = self._mousePosition.x
y = self._mousePosition.y
ang = self.GetAngleFromCoord(x, y)
val = ang*180.0/math.pi
deltarange = self._maxvalue - self._minvalue
deltaangle = self._angleend - self._anglestart
coeff = float(deltaangle)/float(deltarange)
if self._anglestart < 0 and val >= 360.0 + self._anglestart:
scaledval = (val - (360.0 + self._anglestart))/coeff
else:
scaledval = (val - self._anglestart)/coeff
if scaledval > self._maxvalue or scaledval < self._minvalue:
ang = self._old_ang
else:
event = KnobCtrlEvent(wxKC_EVENT_ANGLE_CHANGING, self.GetId())
event.SetEventObject(self)
event.SetOldValue(self.GetValue())
event.SetValue(int(round(scaledval)))
if self.GetEventHandler().ProcessEvent(event):
# the caller didn't use event.Skip()
return
self.SetValue(scaledval)
event.SetEventType(wxKC_EVENT_ANGLE_CHANGED)
event.SetOldValue(scaledval)
self.GetEventHandler().ProcessEvent(event)
self._old_ang = ang
def SetValue(self, val):
"""Sets programmatically the value of KnobCtrl, without sending events."""
if val < self._minvalue or val > self._maxvalue:
return
width, height = self.GetSize()
deltarange = self._maxvalue - self._minvalue
deltaangle = self._angleend - self._anglestart
coeff = float(deltaangle)/float(deltarange)
ang = 360.0 + val*coeff + self._anglestart
ang = ang*math.pi/180.0
self._old_ang = ang
self._trackposition = int(round(val))
self.UpdateDrawing()
def GetValue(self):
"""Returns the value of KnobCtrl."""
return self._trackposition
def GetTrackPosition(self):
"""Used internally."""
return self._old_ang - math.pi
def GetAngleFromCoord(self, cx, cy):
"""Used internally."""
width, height = self.GetSize()
ang = 0
y = (height/2 - float(cy))/(height/2)
x = (float(cx) - width/2)/(height/2)
ang = ang - math.atan2(-y, -x)
if ang < 0:
ang = ang + 2.0*math.pi
return ang
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 Pascual Martinez-Gomez
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from lxml import etree
from nltk.sem.logic import Variable, Expression
from ccg2lambda_tools import assign_semantics_to_ccg
from logic_parser import lexpr
from semantic_index import SemanticIndex
from semantic_index import SemanticRule
from semantic_types import build_dynamic_library
from semantic_types import build_library_entry
from semantic_types import combine_signatures_or_rename_preds
from semantic_types import convert_coq_signatures_to_nltk
from semantic_types import convert_coq_to_nltk_type
from semantic_types import get_coq_types
from semantic_types import get_dynamic_library_from_doc
from semantic_types import merge_dynamic_libraries
from semantic_types import read_type
from semparse import filter_attributes
from theorem import get_formulas_from_doc
class combine_signatures_or_rename_predsTestCase(unittest.TestCase):
def test_different_onepred(self):
exprs = [lexpr(r'pred1(x)'), lexpr(r'pred2(x)')]
sig, exprs_new = combine_signatures_or_rename_preds(exprs)
self.assertEqual(exprs, exprs_new)
def test_equal_onepred(self):
exprs = [lexpr(r'pred1(x)'), lexpr(r'pred1(x)')]
sig, exprs_new = combine_signatures_or_rename_preds(exprs)
self.assertEqual(2, len(sig), msg='Unexpected signature: {0}'.format(sig))
self.assertEqual(exprs, exprs_new)
def test_equalvar_onepred(self):
exprs = [lexpr(r'pred1(x)'), lexpr(r'pred1(y)')]
sig, exprs_new = combine_signatures_or_rename_preds(exprs)
self.assertEqual(3, len(sig), msg='Unexpected signature: {0}'.format(sig))
self.assertEqual(exprs, exprs_new)
def test_different_one_two_pred(self):
exprs = [lexpr(r'pred1(x)'), lexpr(r'pred1(x,y)')]
expected_exprs = [lexpr(r'pred1_e2(x)'), lexpr(r'pred1_e3(x,y)')]
sig, new_exprs = combine_signatures_or_rename_preds(exprs)
self.assertEqual(expected_exprs, new_exprs)
def test_different_one_pred_vartype(self):
exprs = [lexpr(r'pred1(x)'), lexpr(r'pred1(e)')]
expected_exprs = [lexpr(r'pred1_e2(x)'), lexpr(r'pred1_v2(e)')]
sig, exprs_new = combine_signatures_or_rename_preds(exprs)
self.assertEqual(expected_exprs, exprs_new)
def test_different_in_same_expression(self):
exprs = [lexpr(r'pred1(x) & pred1(e)'), lexpr(r'pred1(e)')]
sigs, new_exprs = combine_signatures_or_rename_preds(exprs)
expected_exprs = [
lexpr(r'pred1_e2(x) & pred1_v2(e)'), lexpr(r'pred1_v2(e)')]
self.assertEqual(expected_exprs, new_exprs)
def test_different_in_same_expression_embed(self):
exprs = [lexpr(r'exists x. (pred1(x) & exists e. pred1(e))')]
sigs, new_exprs = combine_signatures_or_rename_preds(exprs)
expected_exprs = [
lexpr(r'exists x. (pred1_e2(x) & exists e. pred1_v2(e))')]
self.assertEqual(expected_exprs, new_exprs)
def test_arbitrary_different_same_pred(self):
doc_str = r"""
<document>
<sentences>
<sentence id="s1">
<tokens>
<token base="pred_same" pos="pos1" surf="surf1" id="t1_1"/>
<token base="pred_same" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
<semantics root="sp1-3">
<span sem="exists x e. _pred_same(x) -> _pred_same(e)" child="sp1-1 sp1-2"/>
<span sem="_pred_same" type="pred_same : Entity -> Prop" id="sp1-1"/>
<span sem="_pred_same" type="pred_same : Event -> Prop" id="sp1-2"/>
</semantics>
</sentence>
</sentences>
</document>
"""
doc = etree.fromstring(doc_str)
sem_nodes = doc.xpath('//semantics')
dynamic_library_str, formulas = get_dynamic_library_from_doc(doc, sem_nodes)
coq_types = dynamic_library_str.split('\n')
expected_coq_types = ["Parameter _pred_same_e2 : Entity -> Prop.",
"Parameter _pred_same_v2 : Event -> Prop."]
self.assertEqual(expected_coq_types, coq_types,
msg="\n{0}\nvs\n{1}".format(expected_coq_types, coq_types))
# TODO: also test for types that are Propositions 't'.
def nltk_sig_to_coq_lib(nltk_sig):
# Convert into coq style library entries.
coq_lib = []
for predicate, pred_type in nltk_sig.items():
library_entry = build_library_entry(predicate, pred_type)
coq_lib.append(library_entry)
return sorted(set(coq_lib))
def semparse_sentence(sentence, semantic_index):
sem_node = etree.Element('semantics')
sem_tree = assign_semantics_to_ccg(sentence, semantic_index)
filter_attributes(sem_tree)
sem_node.extend(sem_tree.xpath('.//descendant-or-self::span'))
sem_node.set('status', 'success')
sem_node.set('root', sentence.xpath('./ccg[1]/@root')[0])
sentence.append(sem_node)
return sentence
class build_arbitrary_dynamic_libraryTestCase(unittest.TestCase):
def test_type_arbitrary_raised(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'N1', r'\P.P', {'coq_type' : 'Entity -> Prop'}),
SemanticRule(r'N2', r'\P.P', {'coq_type' : 'Entity'}),
SemanticRule(r'NP', r'\P Q.(_new(P, Q))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos1" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3" id="test1">
<span terminal="t1_1" category="N1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="N2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="2" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
sentence_sem = semparse_sentence(sentence, semantic_index)
lib, formulas = get_dynamic_library_from_doc(
sentence_sem, sentence_sem.xpath('./semantics'))
coq_types = get_coq_types(sentence_sem)
expected_coq_types = [
"Parameter _base1 : Entity -> Prop.",
"Parameter _base2 : Entity."]
self.assertEqual(expected_coq_types, lib.split('\n'))
def test_lexical_unary_one_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'N', r'\P.P', {'coq_type' : 'Entity -> Prop'}),
SemanticRule(r'NP', r'\P.(P -> P)', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
</tokens>
<ccg root="sp1-2">
<span terminal="t1_1" category="N" end="2" begin="1" id="sp1-1"/>
<span child="sp1-1" rule="lex" category="NP" end="2" begin="1" id="sp1-2"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_types = get_coq_types(ccg_tree)
expected_coq_types = ["Parameter _base1 : Entity -> Prop."]
self.assertEqual(expected_coq_types, coq_types)
def test_lexical_binary_two_types(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P.P', {'coq_type' : 'Entity -> Prop'}),
SemanticRule(r'cat2', r'\P.P', {'coq_type' : 'Entity -> Prop -> Prop'}),
SemanticRule(r'NP', r'\P Q.(Q -> P)', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_types = get_coq_types(ccg_tree)
expected_coq_types = ["Parameter _base1 : Entity -> Prop.",
"Parameter _base2 : Entity -> Prop -> Prop."]
self.assertEqual(expected_coq_types, coq_types)
def test_lexical_binary_one_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P.P'),
SemanticRule(r'cat2', r'\Q x.Q(x)', {'coq_type' : 'Entity -> Prop'}),
SemanticRule(r'NP', r'\P Q x.(P -> Q(x))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_types = get_coq_types(ccg_tree)
expected_coq_types = ["Parameter _base2 : Entity -> Prop."]
self.assertEqual(expected_coq_types, coq_types)
class ArbiAutoTypesTestCase(unittest.TestCase):
def test_lexical_binary_one_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P x.P(x)'),
SemanticRule(r'cat2', r'\P x.P(x)', {'coq_type' : 'Entity -> Prop'}),
SemanticRule(r'NP', r'\P Q x.(Q(x) -> P(x))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_lib = get_coq_types(ccg_tree)
expected_coq_lib = ["Parameter _base2 : Entity -> Prop."]
self.assertEqual(expected_coq_lib, coq_lib)
expression = [ccg_tree.get('sem')]
coq_sig = convert_coq_signatures_to_nltk(coq_lib)
nltk_lib, _ = build_dynamic_library(expression, coq_sig)
lib = merge_dynamic_libraries(coq_sig, nltk_lib, sentence)
expected_lib = ["Parameter _base2 : Entity -> Prop.",
"Parameter _base1 : Entity -> Prop."]
self.assertCountEqual(expected_lib, lib)
def test_lexical_binary_no_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P x.P(x)'),
SemanticRule(r'cat2', r'\P x.P(x)'),
SemanticRule(r'NP', r'\P Q x.(Q(x) -> P(x))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_lib = get_coq_types(ccg_tree)
expected_coq_lib = []
self.assertEqual(expected_coq_lib, coq_lib)
expression = [ccg_tree.get('sem')]
coq_sig = convert_coq_signatures_to_nltk(coq_lib)
nltk_lib, _ = build_dynamic_library(expression, coq_sig)
lib = merge_dynamic_libraries(coq_lib, nltk_lib, sentence)
expected_lib = ["Parameter _base2 : Entity -> Prop.",
"Parameter _base1 : Entity -> Prop."]
self.assertCountEqual(expected_lib, lib)
def test_lexical_binary_one_nltk_complex_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P x.P(x)'),
SemanticRule(r'cat2', r'\Q x y.Q(x, y)'),
SemanticRule(r'NP', r'\P Q x y.(P(x) -> Q(x, y))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_lib = get_coq_types(ccg_tree)
expected_coq_lib = []
self.assertEqual(expected_coq_lib, coq_lib)
expression = [ccg_tree.get('sem')]
coq_sig = convert_coq_signatures_to_nltk(coq_lib)
nltk_lib, _ = build_dynamic_library(expression, coq_sig)
lib = merge_dynamic_libraries(coq_lib, nltk_lib, sentence)
expected_lib = ["Parameter _base2 : Entity -> (Entity -> Prop).",
"Parameter _base1 : Entity -> Prop."]
self.assertCountEqual(expected_lib, lib)
def test_lexical_binary_one_coq_complex_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P x.P(x)'),
SemanticRule(r'cat2', r'\Q R S.Q(R, S)', {'coq_type' : 'Prop -> Entity -> Prop'}),
SemanticRule(r'NP', r'\P Q x R S.(P(x) -> Q(R, S))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_lib = get_coq_types(ccg_tree)
expected_coq_lib = ['Parameter _base2 : Prop -> Entity -> Prop.']
self.assertEqual(expected_coq_lib, coq_lib)
expression = [ccg_tree.get('sem')]
coq_sig = convert_coq_signatures_to_nltk(coq_lib)
nltk_lib, _ = build_dynamic_library(expression, coq_sig)
lib = merge_dynamic_libraries(coq_sig, nltk_lib, sentence)
expected_lib = ["Parameter _base2 : Prop -> (Entity -> Prop).",
"Parameter _base1 : Entity -> Prop."]
self.assertCountEqual(expected_lib, lib)
def test_lexical_binary_two_coq_complex_type(self):
semantic_index = SemanticIndex(None)
semantic_rules = [SemanticRule(r'cat1', r'\P x R.P(x, R)', {'coq_type' : 'Entity -> Prop -> Prop'}),
SemanticRule(r'cat2', r'\Q S T.Q(S, T)', {'coq_type' : 'Prop -> Entity -> Prop'}),
SemanticRule(r'NP', r'\P Q x R S T.(Q(x, R) -> P(S, T))', {'rule' : 'lex'})]
semantic_index.rules = semantic_rules
sentence_str = r"""
<sentence id="s1">
<tokens>
<token base="base1" pos="pos1" surf="surf1" id="t1_1"/>
<token base="base2" pos="pos2" surf="surf2" id="t1_2"/>
</tokens>
<ccg root="sp1-3">
<span terminal="t1_1" category="cat1" end="2" begin="1" id="sp1-1"/>
<span terminal="t1_2" category="cat2" end="3" begin="2" id="sp1-2"/>
<span child="sp1-1 sp1-2" rule="lex" category="NP" end="3" begin="1" id="sp1-3"/>
</ccg>
</sentence>
"""
sentence = etree.fromstring(sentence_str)
ccg_tree = assign_semantics_to_ccg(sentence, semantic_index)
coq_lib = get_coq_types(ccg_tree)
expected_coq_lib = ['Parameter _base1 : Entity -> Prop -> Prop.',
'Parameter _base2 : Prop -> Entity -> Prop.']
self.assertEqual(expected_coq_lib, coq_lib)
expression = [ccg_tree.get('sem')]
coq_sig = convert_coq_signatures_to_nltk(coq_lib)
nltk_lib, _ = build_dynamic_library(expression, coq_sig)
lib = merge_dynamic_libraries(coq_sig, nltk_lib, sentence)
expected_lib = ["Parameter _base2 : Prop -> (Entity -> Prop).",
"Parameter _base1 : Entity -> (Prop -> Prop)."]
self.assertCountEqual(expected_lib, lib)
class Coq2NLTKSignaturesTestCase(unittest.TestCase):
def test_entity(self):
coq_sig = ['Parameter base1 : Entity.',
'Parameter base2 : Prop.']
nltk_sig = convert_coq_signatures_to_nltk(coq_sig)
expected_nltk_sig = {'base1' : read_type('e'),
'base2' : read_type('t')}
self.assertEqual(expected_nltk_sig, nltk_sig)
class Coq2NLTKTypesTestCase(unittest.TestCase):
def test_entity(self):
coq_type = 'Parameter base : Entity.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('e')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_property(self):
coq_type = 'Parameter base : Prop.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('t')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_event(self):
coq_type = 'Parameter base : Event.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('v')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_wrong_type(self):
coq_type = 'Parameter base : YYY.'
self.assertRaises(ValueError, convert_coq_to_nltk_type, coq_type)
def test_entity_property(self):
coq_type = 'Parameter base : Entity -> Prop.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('<e,t>')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_entity_entity_property(self):
coq_type = 'Parameter base : Entity -> Entity -> Prop.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('<e,<e,t>>')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_entity_property_property(self):
coq_type = 'Parameter base : Entity -> Prop -> Prop.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('<e,<t,t>>')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_entity_property_and_property(self):
coq_type = 'Parameter base : (Entity -> Prop) -> Prop.'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('<<e,t>,t>>')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_entity_property_and_property_entity(self):
coq_type = 'Parameter base : (Entity -> Prop) -> (Prop -> Entity).'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('<<e,t>,<t,e>>')}
self.assertEqual(expected_nltk_type, nltk_type)
def test_event_and_entity_property(self):
coq_type = 'Parameter base : Event -> (Entity -> Prop).'
nltk_type = convert_coq_to_nltk_type(coq_type)
expected_nltk_type = {'base' : read_type('<v,<e,t>>')}
self.assertEqual(expected_nltk_type, nltk_type)
class build_dynamic_libraryTestCase(unittest.TestCase):
def test_entity(self):
exprs = [lexpr('Python')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter Python : Entity.']
self.assertEqual(expected_dynamic_library, dynamic_library)
def test_predicate1_argument1(self):
exprs = [lexpr('language(Python)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter Python : Entity.',
'Parameter language : Entity -> Prop.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_predicate1_argument2(self):
exprs = [lexpr('language(Python, Scala)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter Python : Entity.',
'Parameter Scala : Entity.',
'Parameter language : Entity -> (Entity -> Prop).']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_predicate2_argument1_and_2(self):
exprs = [lexpr('AND(language(Python, Scala), nice(Python))')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter nice : Entity -> Prop.',
'Parameter Python : Entity.',
'Parameter Scala : Entity.',
'Parameter language : Entity -> (Entity -> Prop).',
'Parameter AND : Prop -> (Prop -> Prop).']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_predicate2_argument1_and_2Exprs2(self):
exprs = [lexpr('language(Python, Scala)'), lexpr('nice(Python)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter nice : Entity -> Prop.',
'Parameter Python : Entity.',
'Parameter Scala : Entity.',
'Parameter language : Entity -> (Entity -> Prop).']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_pred1_prop_prop(self):
exprs = [lexpr('nice(language(Python, Scala))')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter nice : Prop -> Prop.',
'Parameter Python : Entity.',
'Parameter Scala : Entity.',
'Parameter language : Entity -> (Entity -> Prop).']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_pred2_prop_prop(self):
exprs = [lexpr('nice(language(Python, Scala))'),
lexpr('fun(language(Python, Scala))')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter nice : Prop -> Prop.',
'Parameter fun : Prop -> Prop.',
'Parameter Python : Entity.',
'Parameter Scala : Entity.',
'Parameter language : Entity -> (Entity -> Prop).']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_exists(self):
exprs = [lexpr('exists x.P(x)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> Prop.',
'Parameter x : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_exist(self):
exprs = [lexpr('exist x.P(x)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> Prop.',
'Parameter x : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_Lambda1exists1(self):
exprs = [lexpr('\P.exist x.P(x)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> Prop.',
'Parameter x : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_Lambda2exists1(self):
exprs = [lexpr('\P y.exist x.P(x, y)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> (Entity -> Prop).',
'Parameter x : Entity.',
'Parameter y : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_Lambda3exists1(self):
exprs = [lexpr('\P y.\T.exist x.T(P(x, y))')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> (Entity -> Prop).',
'Parameter T : Prop -> Prop.',
'Parameter x : Entity.',
'Parameter y : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_Lambda3exists2(self):
exprs = [lexpr('\P y.\T.exist x.exists z.T(P(x, y), z)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> (Entity -> Prop).',
'Parameter T : Prop -> (Entity -> Prop).',
'Parameter x : Entity.',
'Parameter y : Entity.',
'Parameter z : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_Lambda3exists2All1(self):
exprs = [lexpr('\P y.\T.all w.exist x.exists z.T(P(x, y), z, w)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> (Entity -> Prop).',
'Parameter T : Prop -> (Entity -> (Entity -> Prop)).',
'Parameter w : Entity.',
'Parameter x : Entity.',
'Parameter y : Entity.',
'Parameter z : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
def test_Lambda3exists2All1Mixed(self):
exprs = [lexpr('\P y.\T.all w.exists z.T(exist x.P(x, y), z, w)')]
dynamic_library, _ = combine_signatures_or_rename_preds(exprs)
dynamic_library = nltk_sig_to_coq_lib(dynamic_library)
expected_dynamic_library = \
['Parameter P : Entity -> (Entity -> Prop).',
'Parameter T : Prop -> (Entity -> (Entity -> Prop)).',
'Parameter w : Entity.',
'Parameter x : Entity.',
'Parameter y : Entity.',
'Parameter z : Entity.']
for item in dynamic_library:
self.assertIn(item, expected_dynamic_library)
self.assertEqual(len(expected_dynamic_library), len(dynamic_library))
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(combine_signatures_or_rename_predsTestCase)
suite2 = unittest.TestLoader().loadTestsFromTestCase(build_arbitrary_dynamic_libraryTestCase)
suite3 = unittest.TestLoader().loadTestsFromTestCase(build_dynamic_libraryTestCase)
suite4 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKTypesTestCase)
suite5 = unittest.TestLoader().loadTestsFromTestCase(Coq2NLTKSignaturesTestCase)
suite6 = unittest.TestLoader().loadTestsFromTestCase(ArbiAutoTypesTestCase)
suites = unittest.TestSuite([suite1, suite2, suite3, suite4, suite5, suite6])
unittest.TextTestRunner(verbosity=2).run(suites)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
from unittest import TestCase
from itertools import chain
import numpy as np
from numpy.lib import NumpyVersion
import sys
sys.path.append('../')
from fpq.vector import *
import fpq.fp
class TestVector(TestCase):
def test_is_valid_format(self):
# float : uint8
self.assertTrue(is_valid_format(np.float16, np.uint8, 2))
self.assertTrue(is_valid_format(np.float32, np.uint8, 2))
self.assertTrue(is_valid_format(np.float64, np.uint8, 2))
for nbits in chain(range(2), range(3,9)):
self.assertFalse(is_valid_format(np.float16, np.uint8, nbits))
self.assertFalse(is_valid_format(np.float32, np.uint8, nbits))
self.assertFalse(is_valid_format(np.float64, np.uint8, nbits))
# float16 : uint16
for nbits in range(2,7):
self.assertTrue(is_valid_format(np.float16, np.uint16, nbits))
for nbits in chain(range(2), range(8,17)):
self.assertFalse(is_valid_format(np.float16, np.uint16, nbits))
# float16 : uint32
for nbits in range(7,13):
self.assertTrue(is_valid_format(np.float16, np.uint32, nbits))
for nbits in chain(range(7), range(13,33)):
self.assertFalse(is_valid_format(np.float16, np.uint32, nbits))
# float16 : uint64
for nbits in range(65):
self.assertFalse(is_valid_format(np.float16, np.uint64, nbits))
# float32 : uint16
for nbits in range(2,7):
self.assertTrue(is_valid_format(np.float32, np.uint16, nbits))
for nbits in chain(range(2), range(8,17)):
self.assertFalse(is_valid_format(np.float32, np.uint16, nbits))
# float32 : uint32
for nbits in range(2,15):
self.assertTrue(is_valid_format(np.float32, np.uint32, nbits))
for nbits in chain(range(2), range(16,33)):
self.assertFalse(is_valid_format(np.float32, np.uint32, nbits))
# float32 : uint64
for nbits in range(15,26):
self.assertTrue(is_valid_format(np.float32, np.uint64, nbits))
for nbits in chain(range(15), range(26,65)):
self.assertFalse(is_valid_format(np.float32, np.uint64, nbits))
# float64 : uint16
for nbits in range(2,7):
self.assertTrue(is_valid_format(np.float64, np.uint16, nbits))
for nbits in chain(range(2), range(7,17)):
self.assertFalse(is_valid_format(np.float64, np.uint16, nbits))
# float64 : uint32
for nbits in range(2,15):
self.assertTrue(is_valid_format(np.float64, np.uint32, nbits))
for nbits in chain(range(2), range(15,33)):
self.assertFalse(is_valid_format(np.float64, np.uint32, nbits))
# float64 : uint64
for nbits in range(2,31):
self.assertTrue(is_valid_format(np.float64, np.uint64, nbits))
for nbits in chain(range(2), range(31,65)):
self.assertFalse(is_valid_format(np.float64, np.uint64, nbits))
def test_calc_breakdown_of_uint(self):
# uint8
expected = (2,2,2,2)
actual = calc_breakdown_of_uint(dtype=np.uint8, nbits=2)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected))
# uint16
expected = ((2, 2, 2, 10),
(2, 3, 3, 8),
(2, 4, 4, 6),
(2, 5, 5, 4),
(2, 6, 6, 2))
for i, nbits in enumerate(range(2,7)):
actual = calc_breakdown_of_uint(dtype=np.uint16, nbits=nbits)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected[i]))
# uint32
expected = ((2, 2, 2, 26), (2, 3, 3, 24), (2, 4, 4, 22),
(2, 5, 5, 20), (2, 6, 6, 18), (2, 7, 7, 16),
(2, 8, 8, 14), (2, 9, 9, 12), (2, 10, 10, 10),
(2, 11, 11, 8), (2, 12, 12, 6), (2, 13, 13, 4),
(2, 14, 14, 2))
for i, nbits in enumerate(range(2,15)):
actual = calc_breakdown_of_uint(dtype=np.uint32, nbits=nbits)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected[i]))
# uint64
expected = ((2, 2, 2, 58), (2, 3, 3, 56), (2, 4, 4, 54),
(2, 5, 5, 52), (2, 6, 6, 50), (2, 7, 7, 48),
(2, 8, 8, 46), (2, 9, 9, 44),(2, 10, 10, 42),
(2, 11, 11, 40), (2, 12, 12, 38), (2, 13, 13, 36),
(2, 14, 14, 34), (2, 15, 15, 32), (2, 16, 16, 30),
(2, 17, 17, 28), (2, 18, 18, 26), (2, 19, 19, 24),
(2, 20, 20, 22), (2, 21, 21, 20), (2, 22, 22, 18),
(2, 23, 23, 16), (2, 24, 24, 14), (2, 25, 25, 12),
(2, 26, 26, 10), (2, 27, 27, 8), (2, 28, 28, 6),
(2, 29, 29, 4), (2, 30, 30, 2))
for i, nbits in enumerate(range(2,31)):
actual = calc_breakdown_of_uint(dtype=np.uint64, nbits=nbits)
self.assertTrue(isinstance(actual, tuple))
self.assertTrue(np.array_equal(actual, expected[i]))
@unittest.skipIf(NumpyVersion(np.__version__) < '1.11.2', 'not supported in this numpy version')
def test_encoding_decoding_between_vec16_and_uint32(self):
dtypes = (np.float16, np.uint32)
nbits = 10
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
def test_encoding_decoding_between_vec32_and_uint32(self):
dtypes = (np.float32, np.uint32)
nbits = 10
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-01, atol=1e-02))
def test_encoding_decoding_between_vec32_and_uint64(self):
dtypes = (np.float32, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
def test_encoding_decoding_between_vec64_and_uint64(self):
dtypes = (np.float64, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
def test_encoding_decoding_between_vec_and_uint_by_ogl(self):
encoder = fpq.fp.encode_fp_to_ogl_snorm
decoder = fpq.fp.decode_ogl_snorm_to_fp
dtypes = (np.float64, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
def test_encoding_decoding_between_vec_and_uint_by_d3d(self):
encoder = fpq.fp.encode_fp_to_d3d_snorm
decoder = fpq.fp.decode_d3d_snorm_to_fp
dtypes = (np.float64, np.uint64)
nbits = 20
expected = np.array([-50, 30, 20], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, dtypes[1]))
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[10, 20, 30],
[-40, 30, 20]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-50, 30, 20]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
expected = np.array([[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 90],
[-50, 30, 20]]],
[[[10, 20, 30],
[-40, 30, 20]],
[[10, 20, 60],
[-80, 30, 20]]]], dtype=dtypes[0])
enc = encode_vec_to_uint(expected, dtype=dtypes[1], nbits=nbits, encoder=encoder)
self.assertTrue(isinstance(enc, np.ndarray))
self.assertTrue(enc.dtype == dtypes[1])
dec = decode_uint_to_vec(enc, dtype=dtypes[0], nbits=nbits, decoder=decoder)
self.assertTrue(isinstance(dec, np.ndarray))
self.assertTrue(dec.dtype == dtypes[0])
self.assertTrue(np.allclose(dec, expected, rtol=1e-03, atol=1e-04))
|
from django.contrib.auth.models import User
from rest_framework import routers, serializers, viewsets, decorators, response
from api.permissions import IsSelfOrSuperUser
from rest_framework.permissions import IsAuthenticated, AllowAny
# Serializers define the API representation.
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = (
'url',
'username',
'email',
'is_staff',
'first_name',
'last_name'
)
partial = True
# ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (IsSelfOrSuperUser, )
{% if with_swagger | bool %}
def list(self, request, *args, **kwargs):
"""
List all users.
**Notes:**
* Requires authenticated user
**Example usage:**
import requests
response = requests.get('/users/')
**Example response:**
[
{
"url": "http://192.168.99.100:8000/users/1/",
"username": "admin",
"email": "a@b.com",
"is_staff": true,
"first_name": "",
"last_name": ""
}
]
---
responseMessages:
- code: 403
message: Not authenticated
consumes:
- application/json
produces:
- application/json
"""
return super(UserViewSet, self).list(request, *args, **kwargs)
{% endif %}
class HealthViewSet(viewsets.ViewSet):
permission_classes = (AllowAny, )
def list(self, request, format=None):
# make sure we can connect to the database
all_statuses = []
status = "up"
db_status = self.__can_connect_to_db()
all_statuses.append(db_status)
if "down" in all_statuses:
status = "down"
data = {
"data": {
"explorer": "/api-explorer",
},
"status": {
"db": db_status,
"status": status
}
}
return response.Response(data)
def __can_connect_to_db(self):
try:
user = User.objects.first()
return "up"
except Exception:
return "down"
# Routers provide an easy way of automatically determining the URL conf.
router = routers.DefaultRouter()
router.register(r'health', HealthViewSet, base_name='health')
router.register(r'users', UserViewSet)
|
# Generated by Django 3.0.7 on 2020-08-17 06:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('performance', '0002_auto_20200817_0934'),
]
operations = [
migrations.AlterField(
model_name='departmentkpi',
name='description',
field=models.TextField(),
),
]
|
import ad3
import numpy as np
from pystruct.inference.common import _validate_params
class InferenceException(Exception):
pass
def inference_ad3_local(unary_potentials, pairwise_potentials, edges, relaxed=False,
verbose=0, return_energy=False, branch_and_bound=False,
inference_exception=None, return_marginals=False):
b_multi_type = isinstance(unary_potentials, list)
if b_multi_type:
res = ad3.general_graph(unary_potentials, edges, pairwise_potentials,
verbose=verbose, n_iterations=4000, exact=branch_and_bound)
else:
n_states, pairwise_potentials = \
_validate_params(unary_potentials, pairwise_potentials, edges)
unaries = unary_potentials.reshape(-1, n_states)
res = ad3.general_graph(unaries, edges, pairwise_potentials,
verbose=verbose, n_iterations=4000, exact=branch_and_bound)
unary_marginals, pairwise_marginals, energy, solver_status = res
if verbose:
print(solver_status)
if solver_status in ["fractional", "unsolved"] and relaxed:
if b_multi_type:
y = (unary_marginals, pairwise_marginals)
else:
unary_marginals = unary_marginals.reshape(unary_potentials.shape)
y = (unary_marginals, pairwise_marginals)
else:
if b_multi_type:
if inference_exception and solver_status in ["fractional", "unsolved"]:
raise InferenceException(solver_status)
ly = list()
_cum_n_states = 0
for unary_marg in unary_marginals:
ly.append(_cum_n_states + np.argmax(unary_marg, axis=-1))
_cum_n_states += unary_marg.shape[1]
y = np.hstack(ly)
else:
y = np.argmax(unary_marginals, axis=-1)
if return_energy:
return y, -energy
if return_marginals:
return y, unary_marginals
return y
|
from pyspark.ml.feature import StringIndexer,IndexToString
from pyspark import SparkContext
from pyspark.sql import SQLContext
sc = SparkContext("local", "samp")
sqlContext = SQLContext(sc)
stringDF=sqlContext.createDataFrame([(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')], ['id', 'category'])
indexer = StringIndexer(inputCol="category", outputCol="categoryIndex")
indexed = indexer.fit(stringDF).transform(stringDF)
converter=IndexToString(inputCol="categoryIndex",outputCol="originalCategory")
converted=converter.transform(indexed)
converted.select("id","originalCategory").show()
|
import logging
from slack_bolt.logger import messages
from .constants import (
QUESTIONS_TYPE_EMOJIS_DICT,
QUESTIONS_TYPE_NAMES_DICT,
)
class ConnectCommand:
question_type_names_dict = QUESTIONS_TYPE_NAMES_DICT
question_type_emojis_dict = QUESTIONS_TYPE_EMOJIS_DICT
def __init__(self, client, say):
self.say = say
self.client = client
def ask_for_question_type(self):
question_types_string = "What kind of question would you like? React with your response:"
for question_type in self.question_type_names_dict:
emoji = self.question_type_emojis_dict[question_type]
question_name = self.question_type_names_dict[question_type]
question_types_string += \
f"\n :{emoji}:" + \
f" {question_name}"
response = self.say(question_types_string)
return response
def add_question_options_as_reactions(self, response):
channel = response.get("channel")
message_timestamp = response.get("ts")
for question_type in self.question_type_emojis_dict:
emoji = self.question_type_emojis_dict[question_type]
self.client.reactions_add(
channel=channel,
timestamp=message_timestamp,
name=emoji,
)
def do_command(self):
response = self.ask_for_question_type()
self.add_question_options_as_reactions(response)
return response
|
from yandeley.models.documents import TrashAllDocument, TrashBibDocument, TrashClientDocument, TrashTagsDocument, \
TrashDocument
from yandeley.resources.base_documents import DocumentsBase
class Trash(DocumentsBase):
"""
Top-level resource for accessing trashed documents. These can be:
- trashed documents for the logged-in user, if retrieved from a
:func:`MendeleySession <yandeley.session.MendeleySession.trash>`.
- trashed documents in a :func:`Group <yandeley.models.groups.Group.trash>`.
"""
_url = '/trash'
def __init__(self, session, group_id):
super(Trash, self).__init__(session, group_id)
def get(self, id, view=None):
"""
Retrieves a trashed document by ID.
:param id: the ID of the document to get.
:param view: the view to get. One of 'bib', 'client', 'tags', 'all'.
:return: a :class:`TrashDocument <yandeley.models.documents.TrashDocument>`.
"""
return super(Trash, self).get(id, view)
def list(self, page_size=None, view=None, sort=None, order=None, modified_since=None, deleted_since=None):
"""
Retrieves trashed documents, as a paginated collection.
:param page_size: the number of documents to return on each page. Defaults to 20.
:param view: the view to get. One of 'bib', 'client', 'tags', 'all'.
:param sort: if specified, sorts documents by the specified field. One of 'created', 'last_modified', 'title'.
:param order: if specified in conjunction with 'sort', specifies the sort order. One of 'asc', 'desc'.
:param modified_since: if specified, only returns files modified after this timestamp.
:param deleted_since: if specified, only returns the IDs of documents deleted after this timestamp.
:return: a :class:`Page <yandeley.pagination.Page>` of
:class:`TrashDocuments <yandeley.models.documents.TrashDocument>`.
"""
return super(Trash, self).list(page_size, view, sort, order, modified_since, deleted_since)
def iter(self, page_size=None, view=None, sort=None, order=None, modified_since=None, deleted_since=None):
"""
Retrieves trashed documents, as an iterator.
:param page_size: the number of documents to retrieve at a time. Defaults to 20.
:param view: the view to get. One of 'bib', 'client', 'tags', 'all'.
:param sort: if specified, sorts documents by the specified field. One of 'created', 'last_modified', 'title'.
:param order: if specified in conjunction with 'sort', specifies the sort order. One of 'asc', 'desc'.
:param modified_since: if specified, only returns files modified after this timestamp.
:param deleted_since: if specified, only returns the IDs of documents deleted after this timestamp.
:return: an iterator of :class:`TrashDocuments <yandeley.models.documents.TrashDocument>`.
"""
return super(Trash, self).iter(page_size, view, sort, order, modified_since, deleted_since)
@staticmethod
def view_type(view):
return {
'all': TrashAllDocument,
'bib': TrashBibDocument,
'client': TrashClientDocument,
'tags': TrashTagsDocument,
'core': TrashDocument,
}.get(view, TrashDocument)
|
def Sum(value1=1, value2=2):
return value1 + value2
print(Sum())
print(Sum(value1=100))
print(Sum(value2=200))
print(Sum(value1=100, value2=200))
print(Sum(100,200))
print(Sum(100))
|
from test import TestProtocolCase, bad_client_wrong_broadcast, bad_client_output_vector
import random
import time
class TestProtocol(TestProtocolCase):
def test_001_cheat_in_sending_different_keys(self):
good_threads = self.make_clients_threads(with_print = True, number_of_clients = self.number_of_players - 1)
bad_thread = self.make_bad_client(bad_client_wrong_broadcast, with_print = True)
protocolThreads = good_threads + [bad_thread]
random.shuffle(protocolThreads)
self.start_protocols(protocolThreads)
done = False
while not done:
completes = [self.is_protocol_complete(p) for p in good_threads]
done = all(completes)
self.stop_protocols(protocolThreads)
tx = good_threads[0].protocol.tx.raw
for pThread in good_threads[1:]:
self.assertEqual(tx, pThread.protocol.tx.raw)
def test_002_cheat_in_sending_different_outputs(self):
protocolThreads = self.make_clients_threads(with_print = True, number_of_clients = self.number_of_players - 1)
bad_thread = self.make_bad_client(bad_client_output_vector, with_print = True)
self.start_protocols(protocolThreads)
protocolThreads.append(bad_thread)
time.sleep(1)
bad_thread.start()
done = False
while not done:
completes = [self.is_protocol_complete(p) for p in protocolThreads[:-1]]
done = all(completes)
self.stop_protocols(protocolThreads)
time.sleep(1)
tx = protocolThreads[0].protocol.tx.raw
for pThread in protocolThreads[:-1]:
print(pThread.protocol.me)
self.assertEqual(tx, pThread.protocol.tx.raw)
|
"""
https://www.codewars.com/kata/578553c3a1b8d5c40300037c/train/python
Given an array of 1s and 0s, return the equivalent binary value to an integer.
binary_array_to_number([0,0,0,1]), 1
binary_array_to_number([0,0,1,0]), 2
binary_array_to_number([1,1,1,1]), 15
binary_array_to_number([0,1,1,0]), 6
"""
from typing import List
def binary_array_to_number(arr: List[int]):
return int('0b' + ''.join([str(num) for num in arr]), 2)
print(binary_array_to_number([1, 1, 1, 1]))
|
import os
import os.path
import pickle
import re
import sys
from fnmatch import filter
from PIL import Image
class PyRewrite():
def __init__(self):
self.config_path = '/'.join(
[os.getenv('HOME'), '.pyrewrite'])
def check_if_config_file_exists(self):
"""Check if config file exists."""
return os.path.isfile(self.config_path)
def load_config(self):
"""Load or create config file."""
config = {}
if self.check_if_config_file_exists():
config = pickle.load(open(self.config_path, 'rb'))
else:
pickle.dump(config, open(self.config_path, 'wb'))
return config
def set_config_path(self, path):
"""Set the config path."""
config = self.load_config()
config['path'] = path
pickle.dump(config, open(self.config_path, 'wb'))
def get_images(self):
"""Get .jpg files in path ignoring the case."""
config = self.load_config()
return filter(os.listdir(config['path']), '*.[Jj][Pp][Gg]')
def rename_images(self):
"""Renamed to files in a standard way."""
updated = 0
config = self.load_config()
if 'path' not in config or config == {}:
sys.exit('Path must be set.')
_files = self.get_images()
for file in _files:
_from = ''.join([config['path'], file])
_to = ''.join([config['path'], re.sub(
'[^a-zA-Z0-9\n\\.]', '-', file).lower()])
if _from != _to:
print('renaming {}'.format(_from))
os.rename(_from, _to)
updated += 1
return updated, config['path']
def compress_images(self, quality):
"""Compress the images to a given quality."""
config = self.load_config()
if 'path' not in config or config == {}:
sys.exit('Path must be set.')
_pre_size = self.get_size(config['path'])
_files = self.get_images()
for file in _files:
fpath = ''.join([config['path'], file])
print('compressing {}'.format(fpath))
try:
Image.open(fpath).save(
fpath, quality=int(quality),
optimize=True, progressive=True)
except Exception:
pass
_post_size = self.get_size(config['path'])
return _pre_size, _post_size
def sizeof_fmt(self, num, suffix='B'):
"""Convert size of directory in readable units."""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def get_size(self, path):
"""Get the size of a directory."""
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return self.sizeof_fmt(total_size)
if __name__ == "__main__":
p = PyRewrite()
"""No argvs == help."""
if len(sys.argv) == 1:
print('\nPyRewrite\n'
'============\n'
'\n'
'Available commands:\n'
'\n'
' pyrewrite set <path>\n'
' pyrewrite rename\n'
' pyrewrite compress <quality>\n'
'\n'
'Configuration:\n'
)
print(' ' + str(p.load_config()) + '\n')
else:
if sys.argv[1] == 'set' and len(sys.argv) == 3:
p.set_config_path(sys.argv[2])
elif sys.argv[1] == 'rename' and len(sys.argv) == 2:
updated, path = p.rename_images()
print('> {} images renamed in {}'.format(updated, path))
elif sys.argv[1] == 'compress' and len(sys.argv) == 3:
pre, post = p.compress_images(sys.argv[2])
print('> compressed images from {} to {}'.format(pre, post))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 2 07:15:26 2022
@author: ACER
"""
# lista en blanco
lista = []
#lista con elementos
ñistElementos = [1,3,4,5]
#acceder a los elementos
listAlumnos = ["adri","rither","jose","juan"]
alumnoPos_1 =listAlumnos[len(listAlumnos)-1] #'juan'
#obtener el tamanio de la lista
tamanioListaAlumnos = len(listAlumnos)
print("el tamaño de la lista alumnos es :",tamanioListaAlumnos)
#insertar elementos a una Lista
lista.appens(1)
lista.appens(2)
lista.appens(5)
# lista [1,2,3]
#lista [1,2,3,5]
# insertar un elemento en un indice de la lista
#insert (indice(0,tamanio_1),elemento)
lista.insert(2,3)
print(lista)
# eliminar elementos de una lista
# lista [1,2,3,5]
lista.pop(0)
# lista [2,3]
print(lista)
listaDocentes = ['jhonny','caballero','haku']
listaDocentes.remove('caballero')
#['jhonny', 'haku']
print(listaDocentes)
# iterar listas
for Docente in listaDocentes:
print(Docente)
tamanioListaDocentes = len(listaDocentes)
for i in range(0, tamanioListaDocentes):
print(listaDocentes[i])
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A routine that implements a K-means routine to cluster similar vectors.
Based on the work of Variational Shape Approximation by Cohen-Steiner and on
the python script developed by Jesus Galvez.
The clustering routing non-overlapping regions according to a specified metric.
I. Algorithm:
1. Make a Mesh datastructure with the following properties embedded:
A. Face Centers
B. Face Normals
C. Vertex Normals
D. Face Count
E. Face Indexes
F. Vertices
G. Area per Face
H. Edge Adjacency
I. Face Adjacency
2. Compute Weighed Normals (Face normal * face area)
3. Create Initial Seeds (if not specified) with *create seeds*
4. Run K-Means Method
'''
__author__ = 'Rafael Pastrana'
__name__ = 'K-Means Clustering'
__version__ = '0.0.5'
__date__ = '17.04.20'
import heapq
import itertools
import math
import random
import time
import numpy as np
import compas.geometry as cg
from functools import total_ordering
from functools import reduce
from streamlines.utilities import Utilities
ut = Utilities()
def k_means(clusters, faces, iters, mergesplit=False, callback=None):
'''
2. Run for N given iterations only.
1. Create Cluster Colors. Make global *counter*.
2B. Create Proxys with *get_proxy*
3. Test whether it is the 1st iteration or not with global *counter*.
4. If 1st, get proxies from clusters through from *get_proxy_seed*
4B. If not, proxies are the regions. Or the other way around.
5. Build a queue with the seeds' adjacent faces through *build_queue*
6. Grow up a cluster with *assign_to_region* method.
7. Create new proxies from created regions with *grow_seeds* method.
8. New proxies become the proxies.
9. Repeat
'''
all_clusters = []
# print('seeds are: {}'.format([cl.seed for cl_key, cl in clusters.items()]))
for it in range(iters):
# print('#####')
s1 = time.time()
new_clusters, new_faces = get_new_clusters(clusters, faces)
q = Queue(new_clusters, new_faces)
q.init_queue()
q.assign()
clusters = q.get_clusters()
all_clusters.append(output_cls(clusters))
if mergesplit is True:
if it < iters-1:
clusters = merge_split(clusters)
if callback:
callback(k=it, clusters=clusters)
e1 = time.time()
# print('Iteration {} execution time was {}'.format(it, (e1-s1)))
# print('End Clusters are: {}'.format(clusters))
return all_clusters
def furthest_init(num, faces, callback=None): # no dependency
# print('#####')
# print('Furthest init started')
s0 = time.time()
clusters = {0: Cluster(0, 0)}
all_clusters = []
for i in range(num):
new_clusters, new_faces = get_new_clusters(clusters, faces)
q = Queue(new_clusters, new_faces)
q.init_queue()
q.assign()
clusters = q.get_clusters()
all_clusters.append(output_cls(clusters))
if i < num-1:
t_s = get_cluster_to_split(clusters)
clusters = split_cluster(t_s, clusters)
if callback:
callback(k=i, clusters=clusters)
e0 = time.time()
# print('Furthest init execution time was {}'.format(e0-s0))
return all_clusters
def output_cls(clusters):
new_clusters = {}
for c_key, cluster in clusters.items():
new_cl = Cluster(cluster.id, cluster.seed)
new_cl.copy_cluster(cluster)
new_clusters[c_key] = new_cl
return new_clusters
def get_new_clusters(clusters, faces):
n_clusters = {}
for key, cluster in clusters.items():
cluster.harvest_faces(faces)
cluster.set_proxy()
n_cluster = Cluster(cluster.id, cluster.get_new_seed())
n_clusters[n_cluster.id] = n_cluster
cluster.clear_faces() # clears out cluster and face relationship
return n_clusters, faces
def clear_clusters(faces):
for f_key, face in faces.items():
face.clear_cluster()
def get_random_seeds(maximum, num):
return random.sample(range(0, maximum), num)
def get_random_color(num):
r = random.randint(0, 255)
g = random.randint(0, 255)
b = random.randint(0, 255)
return (r, g, b)
def get_random_colors(values):
return list(map(get_random_color, values))
def make_faces(s_mesh, tag, keys=None, weight=False): # no dep
faces = {}
keys = keys or s_mesh.c_mesh.faces()
for f_key in keys:
face = Face(f_key)
halfedges = s_mesh.cMesh.face_halfedges(f_key)
vector = s_mesh.cMesh.get_face_attribute(f_key, tag)
face.set_halfedges([tuple(sorted(h)) for h in halfedges])
face.set_vertices(s_mesh.cMesh.face_vertices(f_key))
face.set_area(s_mesh.cMesh.face_area(f_key))
face.set_neighbours(s_mesh.cMesh.face_neighbors(f_key))
face.set_vector(vector)
face.set_weighed_vector(weight)
face.set_angle(vector)
faces[f_key] = face
return faces
def cluster_adjacency(clusters):
comb = itertools.combinations(range(len(clusters)), 2)
cl_comb = [(clusters.get(x[0]), clusters.get(x[1])) for x in comb]
return list(filter(is_adjacent, cl_comb))
def is_adjacent(cluster_pair):
vert_1 = cluster_pair[0].get_faces_halfedges()
vert_2 = cluster_pair[1].get_faces_halfedges()
return len(vert_1.intersection(vert_2)) > 0
def get_clusters_to_merge(adj_clusters):
return min(adj_clusters, key=lambda x: simulate_merge(x[0], x[1]))
def get_cluster_to_split(clusters):
return max(clusters.items(), key=lambda x: x[1].get_distortion())[1]
def simulate_merge(cluster_1, cluster_2):
t_faces = set(cluster_1.faces + cluster_2.faces)
e = errors(t_faces, get_proxy(t_faces))
return distortion(e)
def merge_clusters(t_m, clusters):
resilient = t_m[0]
resilient.absorb_cluster(t_m[1])
new_clusters = {v.id: v for k, v in clusters.items() if v.id != t_m[1].id}
new_clusters[resilient.id] = resilient
return new_clusters, t_m[1].id
def split_cluster(s_cluster, clusters, new_id=None):
new_fkey = s_cluster.get_worst_seed()
s_cluster.remove_face(new_fkey)
if new_id is None:
new_id = max(clusters.keys()) + 1
clusters[new_id] = Cluster(new_id, new_fkey)
clusters[s_cluster.id] = s_cluster
return clusters
def merge_split(clusters):
adj_cl = cluster_adjacency(clusters) # returns objects list
to_merge = get_clusters_to_merge(adj_cl) # returns objects tuple
to_split = get_cluster_to_split(clusters) # returns object single
if execute_merge_split(to_merge, to_split) is True:
clusters, new_id = merge_clusters(to_merge, clusters)
clusters = split_cluster(to_split, clusters, new_id)
return clusters
def execute_merge_split(t_m, t_s):
to_merge_err = reduce(lambda x, y: x+y, [x.get_distortion() for x in t_m])
merged_err = simulate_merge(t_m[0], t_m[1])
dif = merged_err - to_merge_err
worst_err = t_s.get_distortion()
if math.fabs(dif) < 0.50 * worst_err: # 0.5, not squared
print('merge-split is True')
return True
else:
print('merge-split is False')
return False
def get_proxy(faces):
# return get_proxy_angle(faces)
return get_proxy_vector(faces)
def get_proxy_angle(faces):
angles = [face.angle for face in faces]
return proxy_maker(angles)
def get_proxy_vector(faces):
w_ve = [face.vector for face in faces]
w_ve = list(map(lambda x: ut.align_vector(x, w_ve[0]), w_ve))
r_ve = reduce(lambda x, y: cg.add_vectors(x, y), w_ve)
return cg.normalize_vector(r_ve)
def proxy_maker(values):
return np.mean(values) # mean, median?
def errors(faces, proxy):
return [face.get_error(proxy) for face in faces]
def distortion(errors):
# return reduce(lambda x, y: x+y, errors)
return np.mean(np.array(errors) ** 2)
# return np.sum(np.array(errors))
class Queue():
def __init__(self, clusters, faces):
self.clusters = clusters
self.faces = faces
self.queue = []
heapq.heapify(self.queue)
def init_queue(self):
for c_key, cluster in self.clusters.items():
cluster.add_seed(self.faces)
cluster.set_proxy()
n_faces = self.get_neighbour_faces(cluster.seed)
self.update_queue(n_faces, c_key)
def update_queue(self, n_faces, c_key):
for f in n_faces:
if f.cluster is None:
error = f.get_error(self.clusters.get(c_key).proxy)
entry = {'fkey': f.fkey, 'cluster': c_key}
heapq.heappush(self.queue, KeyDict(error, entry))
def assign(self):
while len(self.queue) > 0:
entry = heapq.heappop(self.queue)
cu_f = entry.dct.get('fkey')
face = self.faces.get(cu_f)
if face.cluster is None:
c_key = entry.dct.get('cluster')
cluster = self.clusters.get(c_key)
cluster.add_face(face)
self.update_queue(self.get_neighbour_faces(cu_f), c_key)
def get_neighbour_faces(self, f_key):
for nf in self.faces.get(f_key).neighbours:
yield self.faces.get(nf)
def get_clusters(self):
for ckey, cluster in self.clusters.items():
cluster.set_proxy()
cluster.set_distortion()
return self.clusters
class Cluster():
def __init__(self, id, f):
self.id = id
self.seed = f
self.faces = []
self.faces_keys = []
self.proxy = None
self.distortion = None
self.add_face_key(self.seed)
def remove_face(self, fkey):
self.faces_keys = [k for k in self.faces_keys if k != int(fkey)]
self.faces = [f for f in self.faces if f.fkey != fkey]
self.set_proxy()
def absorb_cluster(self, other_cluster):
for o_face in other_cluster.faces:
self.add_face(o_face)
self.set_proxy()
self.set_faces_in_cluster()
def copy_cluster(self, other_cluster):
self.faces_keys = list(set(other_cluster.faces_keys))
self.proxy = other_cluster.proxy
self.distortion = other_cluster.distortion
def add_face_key(self, f_key):
if f_key not in self.faces_keys:
self.faces_keys.append(f_key)
def add_seed(self, faces):
seed_face = faces.get(self.seed)
seed_face.set_cluster(self.id)
self.faces.append(seed_face)
def add_face(self, face):
if face.fkey not in self.faces_keys:
face.set_cluster(self.id)
self.faces_keys.append(face.fkey)
self.faces.append(face)
def harvest_faces(self, faces):
for key in self.faces_keys:
self.faces.append(faces.get(key))
def get_weighed_vectors(self):
return [face.w_vector for face in self.faces]
def get_vectors(self):
return [face.vector for face in self.faces]
def get_angles(self):
return [face.angle for face in self.faces]
def set_faces_in_cluster(self):
for face in self.faces:
face.cluster = self.id
def set_proxy(self):
func = self.set_vector_proxy
# func = self.set_angle_proxy
return func()
def set_vector_proxy(self): # NEW
w_ve = self.get_vectors()
w_ve = list(map(lambda x: ut.align_vector(x, w_ve[0]), w_ve))
r_ve = reduce(lambda x, y: cg.add_vectors(x, y), w_ve)
self.proxy = cg.normalize_vector(r_ve)
def set_angle_proxy(self):
angles = self.get_angles()
self.proxy = proxy_maker(angles) # average...median?
def get_errors(self):
return [face.get_error(self.proxy) for face in self.faces]
def get_new_seed(self):
return min(self.faces, key=lambda x: x.get_error(self.proxy)).fkey
def get_worst_seed(self):
return max(self.faces, key=lambda x: x.get_error(self.proxy)).fkey
def get_face_keys(self):
return [f.fkey for f in self.faces]
def get_faces_halfedges(self):
face_halfedges = set()
for f in self.faces:
face_halfedges.update(f.halfedges)
return face_halfedges
def get_faces_vertices(self):
face_vertices = set()
for f in self.faces:
face_vertices.update(f.vertices)
return face_vertices
def clear_faces(self):
for face in self.faces:
face.clear_cluster()
self.faces[:] = []
def get_distortion(self):
# return reduce(lambda x, y: x+y, self.get_errors())
# return np.mean(self.get_errors())
return distortion(self.get_errors())
# return np.sum(self.get_errors())
def set_distortion(self):
self.distortion = self.get_distortion()
def __repr__(self):
f = len(self.faces)
fk = len(self.faces_keys)
s = self.seed
dst = self.distortion
return 'id:{0} seed:{1} distortion:{4} faces:{2}, keys:{3}'.format(self.id, s, f, fk, dst)
class Face():
def __init__(self, fkey):
self.fkey = fkey
self.vertices = None
self.halfedges = None
self.vector = None
self.vector_length = None
self.w_vector = None
self.area = None
self.neighbours = None
self.error = None
self.angle = None
self.cluster = None
def set_vertices(self, vertices):
self.vertices = vertices
def set_halfedges(self, halfedges):
self.halfedges = halfedges
def set_cluster(self, cluster):
self.cluster = cluster
def set_vector(self, vector):
self.vector = cg.normalize_vector(vector)
self.vector_length = cg.length_vector(vector)
def set_weighed_vector(self, area_weight=False):
if area_weight is True:
self.w_vector = cg.scale_vector(self.vector, self.area)
else:
self.w_vector = self.vector
def set_angle(self, vector):
angle = cg.angle_vectors([1.0, 0.0, 0.0], vector, deg=True)
if angle > 90.0:
angle = 180.0 - angle
self.angle = angle
def set_area(self, area):
self.area = area
def set_neighbours(self, neighbours):
self.neighbours = [n for n in neighbours if n is not None]
def get_error(self, proxy):
func = self.get_error_vector
# func = self.get_error_angle
return func(proxy)
def get_error_vector(self, proxy, area_weight=False):
ali_vec = ut.align_vector(self.vector, proxy)
difference = cg.subtract_vectors(ali_vec, proxy)
error = cg.length_vector_sqrd(difference) # original
# error = cg.length_vector(difference)
# if area_weight is True:
# error = self.area * cg.length_vector_sqrd(difference)
# do something about weights
# w_1 = 0.3
# w_2 = 0.7
# w_error = w_1 * error + w_2 * self.vector_length
return error
def get_error_angle(self, proxy):
error = math.fabs(self.angle - proxy)
if error > 90.0:
error = math.fabs(180.0 - error)
if error > 45:
error = math.fabs(90.0 - error)
return error
def set_error(self, proxy): # NEW
self.error = self.get_error(proxy)
def clear_cluster(self):
self.cluster = None
@total_ordering
class KeyDict(object):
def __init__(self, key, dct):
self.key = key
self.dct = dct
def __lt__(self, other):
return self.key < other.key
def __eq__(self, other):
return self.key == other.key
def __repr__(self):
return '{0.__class__.__name__}(key={0.key}, dct={0.dct})'.format(self)
|
from sys import exit
# The number of temperatures to analyse
N = int(raw_input())
# Return 0 and exit if we have no temperatures
if (N <= 0):
print 0
exit()
# The N temperatures expressed as integers ranging from -273 to 5526
temperatures = map(int, raw_input().split(' '))
minValue = min(temperatures, key=lambda x:abs(x))
# If two numbers are equally close to 0,
# positive integer has te considered closest to 0
if minValue < 0 and abs(minValue) in temperatures:
print abs(minValue)
else:
print minValue
|
"""Script to run image capture screenshots for state data pages.
"""
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from datetime import datetime
import io
import os
from pytz import timezone
import sys
import time
import boto3
from loguru import logger
from captive_browser import CaptiveBrowser
from url_source import load_one_source
parser = ArgumentParser(
description=__doc__,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument(
'--temp-dir',
default='/tmp/public-cache',
help='Local temp dir for snapshots')
parser.add_argument(
'--s3-bucket',
default='covid-data-archive',
help='S3 bucket name')
parser.add_argument('--states',
default='',
help='Comma-separated list of state 2-letter names. If present, will only screenshot those.')
parser.add_argument('--public-only', action='store_true', default=False,
help='If present, will only snapshot public website and not state pages')
parser.add_argument('--push-to-s3', dest='push_to_s3', action='store_true', default=False,
help='Push screenshots to S3')
parser.add_argument('--replace-most-recent-snapshot', action='store_true', default=False,
help='If present, will first delete the most recent snapshot for the state before saving '
'new screenshot to S3')
_PUBLIC_STATE_URL = 'https://covidtracking.com/data/'
_STATES_LARGER_WINDOWS = ['DE', 'IN', 'MA', 'NC', 'OK']
class S3Backup():
def __init__(self, bucket_name: str):
self.s3 = boto3.resource('s3')
self.bucket_name = bucket_name
self.bucket = self.s3.Bucket(self.bucket_name)
# for now just uploads image (PNG) file with specified name
def upload_file(self, local_path: str, s3_path: str):
self.s3.meta.client.upload_file(
local_path, self.bucket_name, s3_path,
ExtraArgs={'ContentType': 'image/png'})
def delete_most_recent_snapshot(self, state: str):
state_file_keys = [file.key for file in self.bucket.objects.all() if state in file.key]
most_recent_state_key = sorted(state_file_keys, reverse=True)[0]
self.s3.Object(self.bucket_name, most_recent_state_key).delete()
# saves screenshot of data_url to specified path. takes 5 sec. can throw exception on load fail
def screenshot_to_path(data_url, path, browser):
logger.info(f" 1. get content from {data_url}")
if not browser.navigate(data_url):
logger.error(" get timed out -> skip")
return
logger.info(f" 2. wait for 5 seconds")
time.sleep(5)
logger.info(f" 3. save screenshot to {path}")
browser.screenshot(path)
def screenshot(state, data_url, args, s3, browser):
logger.info(f"Screenshotting {state} from {data_url}")
timestamp = datetime.now(timezone('US/Eastern')).strftime("%Y%m%d-%H%M%S")
filename = "%s-%s.png" % (state, timestamp)
local_path = os.path.join(args.temp_dir, filename)
try:
screenshot_to_path(data_url, local_path, browser)
except Exception as exc:
logger.error(f" Failed to screenshot {state}!")
raise exc
if args.push_to_s3:
s3_path = os.path.join('state_screenshots', state, filename)
if args.replace_most_recent_snapshot:
logger.info(f" 3a. first delete the most recent snapshot")
s3.delete_most_recent_snapshot(state)
logger.info(f" 4. push to s3 at {s3_path}")
s3.upload_file(local_path, s3_path)
def main(args_list=None):
if args_list is None:
args_list = sys.argv[1:]
args = parser.parse_args(args_list)
browser = CaptiveBrowser()
s3 = S3Backup(bucket_name=args.s3_bucket)
# get states info from API
src = load_one_source("google-states-csv")
state_info_df = src.df
failed_states = []
def screenshot_with_size_handling(state, data_url):
# hack: if state needs to be bigger, capture that too. public site is huge.
current_size = browser.driver.get_window_size()
if state in _STATES_LARGER_WINDOWS:
logger.info("temporarily resize browser to capture longer state pages")
browser.driver.set_window_size(current_size["width"], current_size["height"] * 2)
elif state == 'public':
logger.info("temporarily resize browser to capture longer public page")
browser.driver.set_window_size(current_size["width"], current_size["height"] * 4)
try:
screenshot(state, data_url, args, s3, browser)
except Exception:
failed_states.append(state)
finally:
logger.info("reset browser to original size")
browser.driver.set_window_size(current_size["width"], current_size["height"])
# screenshot public state site
screenshot_with_size_handling('public', _PUBLIC_STATE_URL)
if args.public_only:
logger.info("Not snapshotting state pages, was asked for --public-only")
return
# screenshot state images
if args.states:
states = args.states.split(',')
for state in states:
data_url = state_info_df.loc[state_info_df.state == state].head(1).data_page.values[0]
screenshot_with_size_handling(state, data_url)
else:
for idx, r in state_info_df.iterrows():
# if idx > 1:
# break
state = r["location"]
data_url = r["data_page"]
screenshot_with_size_handling(state, data_url)
if failed_states:
logger.error(f"Failed states for this run: {','.join(failed_states)}")
if __name__ == "__main__":
main()
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.optim.lr_scheduler import StepLR
from tqdm import tqdm
torch.manual_seed(0)
def p_k(x):
#target density: p(x) = 1/Z exp(-E(x))
#TODO: higher dimensional distribution
#TODO: let p_k(x) be graphical model posterior
x1, x2 = torch.chunk(x, chunks=2, dim=1)
norm = torch.sqrt(x1**2 + x2**2)
exp1 = torch.exp(-0.5 * ((x1 - 2) / 0.6)**2)
exp2 = torch.exp(-0.5 * ((x1 + 2) / 0.6)**2)
u = 0.5 * ((norm - 2)/0.4)**2 - torch.log(exp1 + exp2 + 1e-8)
return torch.exp(-u)
class FreeEnergyLoss(nn.Module):
def __init__(self, density):
super(FreeEnergyLoss, self).__init__()
self.density = density
def forward(self, xk, logdet_jacobians):
logdet_jacobians_sum = sum(logdet_jacobians)
return (-logdet_jacobians_sum - torch.log(self.density(xk) + 1e-8)).mean()
class PlanarFlow(nn.Module):
def __init__(self, dim):
super(PlanarFlow, self).__init__()
self.weight = nn.Parameter(torch.Tensor(1, dim))
self.bias = nn.Parameter(torch.Tensor(1))
self.scale = nn.Parameter(torch.Tensor(1, dim))
self.tanh = nn.Tanh()
self.reset_parameters()
def reset_parameters(self):
self.weight.data.uniform_(-0.01, 0.01)
self.scale.data.uniform_(-0.01, 0.01)
self.bias.data.uniform_(-0.01, 0.01)
def forward(self, x):
activation = F.linear(x, self.weight, self.bias)
return x + self.scale * self.tanh(activation)
class PlanarFlow_LogDetJacobian(nn.Module):
def __init__(self, planar):
super(PlanarFlow_LogDetJacobian, self).__init__()
self.weight = planar.weight
self.bias = planar.bias
self.scale = planar.scale
self.tanh = planar.tanh
def forward(self, x):
activation = F.linear(x, self.weight, self.bias)
psi = (1 - self.tanh(activation)**2) * self.weight
det_grad = 1 + torch.mm(psi, self.scale.t())
return torch.log(det_grad.abs() + 1e-8)
class NormalizingFlow(nn.Module):
def __init__(self, dim, flow_length):
super(NormalizingFlow, self).__init__()
self.transforms = nn.Sequential(*(
PlanarFlow(dim) for _ in range(flow_length)
))
self.logdet_jacobians = nn.Sequential(*(
PlanarFlow_LogDetJacobian(t) for t in self.transforms
))
def forward(self, x):
logdet_jacobians_output = []
for transform, logdet_jacobian in zip(self.transforms, self.logdet_jacobians):
logdet_jacobians_output.append(logdet_jacobian(x)) #forward call on prev sample
x = transform(x)
xk = x
return xk, logdet_jacobians_output
use_gpu = torch.cuda.is_available()
#instantiate NF
net = NormalizingFlow(dim=2, flow_length=8)
if use_gpu:
print "found CUDA GPU..."
net = net.cuda()
print net
#define loss and optimizer
criterion = FreeEnergyLoss(density=p_k)
#optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
optimizer = optim.RMSprop(net.parameters(), lr=0.01)
scheduler = StepLR(optimizer, step_size=4, gamma=0.5) #half learning rate every 4 epochs
#training parameters
xdim = 2
num_iter = 16
batch_size = 512
generate_plots = True
print "training..."
training_loss_tot = []
learning_rate_schedule = []
for epoch in tqdm(range(16)):
scheduler.step()
running_loss = 0.0
for iteration in range(num_iter):
data = torch.zeros(batch_size, xdim).normal_(mean=0, std=1)
if use_gpu:
data = Variable(data.cuda())
else:
data = Variable(data)
optimizer.zero_grad()
xk, logdet_jacobians = net(data)
loss = criterion(xk, logdet_jacobians)
loss.backward()
optimizer.step()
running_loss += loss.data[0].cpu()
#end for
training_loss_tot.append(running_loss / float(num_iter))
learning_rate_schedule.append(scheduler.get_lr())
print "epoch: %4d, loss: %.3f" %(epoch+1, running_loss / float(num_iter))
if (generate_plots):
samples = torch.zeros(1000, xdim).normal_(mean=0, std=1)
if use_gpu:
samples = Variable(samples.cuda())
else:
samples = Variable(samples)
xk, logdetj = net(samples)
plt.figure()
plt.scatter(xk.data.cpu().numpy()[:,0], xk.data.cpu().numpy()[:,1])
plt.title('epoch: ' + str(epoch))
plt.savefig('./figures/nf_epoch_' + str(epoch) + '.png')
#end for
print "finished training..."
#generate plots
#plot original density
x1 = np.linspace(-5, 5, 300)
x2 = np.linspace(-5, 5, 300)
x1, x2 = np.meshgrid(x1, x2)
shape = x1.shape
x1 = x1.ravel()
x2 = x2.ravel()
xt = np.c_[x1, x2]
xt = torch.FloatTensor(xt)
xt = Variable(xt)
gt_density = p_k(xt).data.cpu().numpy().reshape(shape)
plt.figure()
plt.imshow(gt_density, cmap='summer')
plt.savefig('./figures/nf_ground_truth.png')
#plot training loss
plt.figure()
plt.plot(training_loss_tot, label='RMSProp')
plt.title("Normalizing Flow Training Loss")
plt.xlabel("Epoch"); plt.ylabel("Free Energy Loss")
plt.legend(); plt.grid(True);
plt.savefig('./figures/nf_training_loss.png')
#plot learning rate schedule
plt.figure()
plt.plot(learning_rate_schedule, label='learning rate')
plt.title("NF learning rate schedule")
plt.xlabel("Epoch"); plt.ylabel("Learning Rate")
plt.legend(); plt.grid(True)
plt.savefig('./figures/nf_lr_schedule.png')
|
import os
import logging
import re
import random
import requests
from bs4 import BeautifulSoup
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class PatternFinder:
headers = [
{"Accept-Language": "en-US,en;q=0.5", "User-Agent": "Mozilla/5.0"},
{"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:12.0) Gecko/20100101 Firefox/12.0", "Accept": "text/html"},
{"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/64.0.3282.140 Safari/537.36 Edge/17.17134", "Accept": "text/html"},
{"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/74.0.3729.169 Safari/537.36", "Accept": "text/html"},
{"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 (KHTML, "
"like Gecko) Mobile/15E148", "Accept": "text/html"},
{"Accept-Language": "en-US,en;q=0.5",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 12_2 like Mac OS X) AppleWebKit/605.1.15 "
"(KHTML, like Gecko) Mobile/15E148", "Accept": "text/html"}
]
def __init__(self, url: str, pattern_to_search: str = "div", x: int = 5):
"""
generally lyrics are wriiten in <p> tag, preceded by a div tag.
This class will search for any such pattern that is present in the webpage
:param url: url to search inside
:param pattern_to_search: pattern to search (:default: div>p)
:param x threshold condition for a div to be selected a lyrics div
"""
self.url = url
self.pattern_to_search = pattern_to_search
self.p_threshold = x
self.page_soup = None
def create_soup(self):
header_ = random.choice(self.headers)
page_content = requests.get(self.url, header_)
self.page_soup = BeautifulSoup(page_content.content, 'html.parser')
def find_match(self, auto_purify=False, auto_save=False, save_raw=False):
self.create_soup() # request page content and create a beautiful soup object
all_div_selector = self.pattern_to_search
divs = self.page_soup.select(all_div_selector)
if divs: # if there's one div, that has at-least 5 <p> tags inside it.
# find the best matching div
all_p_counts = lambda divs: [len(div_html.select('p')) for div_html in divs]
p_counts = all_p_counts(divs)
max_index = lambda list_: list_.index(max(list_))
best_div_index = max_index(p_counts)
best_div_match = divs[best_div_index]
max_p_count = p_counts[best_div_index]
if self.p_threshold > max_p_count:
res = input(
f"Max <p> count({max_p_count}) is much less than threshold({self.p_threshold}) wish to skip "
f"finding lyrics for this link (y|Y, n|N) ?")
if res.lower() == 'y' or res.lower() == 'yes':
pass
else:
logger.info("You chose to skip.")
return None
logger.info(f"Best matched <div> has {max_p_count} <p> tags. Selected 1 out of {len(p_counts)},"
f" index={best_div_index}")
# -------------------- extract text from the matching div ------------------------
text = best_div_match.text # .text will automatically remove all <script> part, if there's any captured
if save_raw:
self.save_(text, 'raw')
if not auto_purify:
if auto_save: self.save_(text)
return text
else:
text_ = self.purify_content(text)
if auto_save: self.save_(text_)
return text_
@staticmethod
def purify_content(text):
mod_text = re.sub('\n{3,}', '\n', text)
mod_text = re.sub('x\d+', ' ', mod_text)
return mod_text
@staticmethod
def save_(text, filename: str = 'temp'):
with open(filename, 'w') as lyrics_:
lyrics_.write(text)
logger.info(f"lyrics saved in file: {os.path.realpath(filename)}")
if __name__ == '__main__':
P = PatternFinder(url="https://www.lyricsmint.com/roy/chittiyaan-kalaiyaan")
P.find_match(auto_save=True, auto_purify=True)
|
import textwrap
from ...core.model.env_vars import INSTALLATION_NAMESPACE
def get_deployment_yaml(name, image="busybox"):
return textwrap.dedent(
f"""\
apiVersion: apps/v1
kind: Deployment
metadata:
name: {name}
namespace: {INSTALLATION_NAMESPACE}
labels:
app: {name}
spec:
replicas: 1
selector:
matchLabels:
app: {name}
template:
metadata:
labels:
app: {name}
spec:
containers:
- name: runner
image: {image}
imagePullPolicy: Always
"""
)
|
import http.client
import json
import logging
def fetch_google_first_name_last_name(access_token):
"""
Fetch google first name and google last name from google plus
:return: first name, last name
"""
try:
conn = http.client.HTTPSConnection('www.googleapis.com')
conn.request('GET', '/plus/v1/people/me',
headers={'Authorization': 'Bearer ' + access_token})
rs = conn.getresponse()
except http.client.HTTPException:
logging.exception('Google API Service exception')
return '', ''
if rs.status == 200:
data = json.loads(rs.read())
if 'name' in data.keys():
name = data.get('name')
last_name = name.get('familyName')
first_name = name.get('givenName')
return first_name, last_name
return '', ''
|
import itertools
while True:
try:
x,y=input().split()
x,y=list(x),int(y)
try:
p=itertools.permutations(x)
for i in range(y-1): p.__next__()
print("".join(x),y,"=","".join(p.__next__()))
except:
print("".join(x),y,"=","No permutation")
continue
except:
exit(0)
|
from sys import stdout
H, W = map(int, input().split())
a = [input() for _ in range(H)]
h = [all(c == '.' for c in a[i]) for i in range(H)]
w = [True] * W
for i in range(H):
for j in range(W):
w[j] = w[j] and a[i][j] == '.'
for i in range(H):
if h[i]:
continue
for j in range(W):
if w[j]:
continue
stdout.write(a[i][j])
stdout.write('\n')
|
import pandas as pd
import numpy as np
import os
closing_message = ' < < < \t < < < \n'
def binary_transform(X): return X.astype(bool)*1
def relative_transform(X): return X.apply(lambda x: x/x.sum(), axis=0)
def z_transform(X): return X.apply(lambda x: (x-x.mean())/x.std(), axis=0)
def mean_transform(X): return X.apply(lambda x: x/x.mean(), axis=0)
###################################################################
# UTILITIES
def built_in_transform():
bits = {}
bits['binary'] = binary_transform
bits['relative'] = relative_transform
bits['z'] = z_transform
bits['mean'] = mean_transform
return bits
def empirical_distribution(X,scale='linear',n_bins=10):
''''''
x_l = X.min()
x_u = X.max()
if scale=='linear':
bins=np.linspace(x_l,x_u,n_bins)
if scale=='log':
bins=np.logspace(np.log10(x_l),np.log10(x_u),n_bins)
p=np.histogram(a=X,bins=bins)
bpoint = p[1]
probs = p[0]/p[0].sum()
return bpoint, probs
def size_cutoff(X,l,u):
Y=X.copy()
# keep only realizations that lay in the range
S=Y.sum(axis=0)
S=S[S>l][S<u]
Y = Y[S.index]
#throw away componets that do not appear in any realization
K=Y.sum(axis=1)
K=K[K>0]
Y = Y.loc[K.index]
del S
del K
return Y
def sparsity_cutoff(X,pc=1,om=1):
Y=X.copy()
M=(1*Y.astype(bool)).sum(axis=1)
Y = Y.loc[ M[M>om].index ]
B=(1*Y.astype(bool)).sum(axis=0)
Y=Y[ B[B>pc].index ]
del B
del M
return Y
def core_protocol(std_t,core_t,core_cut,index_name='taxon_name'):
configurations = set(std_t.columns).intersection(core_t.columns)
components = set(std_t.index).intersection(core_t.index)
std_t = std_t[configurations].loc[components]
core_t = core_t[configurations].loc[components]
C = pd.DataFrame(columns=configurations,index=components)
for s in list(configurations):
r,p = std_t[s],core_t[s]
C[s] = r.loc[ p[ p>core_cut ].index ]
C=C.fillna(0)
V=C.sum(axis=0)
C=C[ V[V>0].index ]
U=C.sum(axis=1)
C=C.loc[ U[U>0].index ]
C.index=C.index.rename(index_name)
return C
###################################################################
# TABLE OBJECT
class table:
def __init__(self,T,cut=True,lower_size=0,upper_size=np.inf,pc=1,om=1,verbose=False):
if verbose==False:
self.vm = 0
elif verbose==True:
self.vm = 1
print(self.vm*' > > > table initialization: ',end='')
self.form = {}
''' first original for external use, second for internal use and cool coding'''
if pc!=1 or om!=1:
Y = sparsity_cutoff(X=T,pc=pc,om=om)
else:
Y = T.copy()
if cut==True:
Z = size_cutoff(X=Y,l=lower_size,u=upper_size)
else:
Z = Y.copy()
self.form['original'] = Z.copy()
self.original = Z.copy()
# name of the set of variables
self.annotation = self.original.index.name
self.shape = self.original.shape
self.samples = list(self.original.columns)
# name of each variable
self.components = list(self.original.index)
self.realization_size = self.original.sum(axis=0)
#self.n_components = (self.original.astype(bool)*1).sum(axis=0)
self.bits = built_in_transform()
self.binned = False
self.partitions = {'original':{'original':list(self.samples)}}
self.observables = {}
print(self.vm*'done',end=self.vm*'\n')
print(self.vm*closing_message,end=self.vm*'\n')
def add_partition(self,partition,name):
''' a partition of the dataset is a vocabulary which has groups labels as keys
and a list of samples as values. This will allow the observables calculation.
To effectively add a partition to the table object, apart from the vocabulary itself,
a name for the partition needs to be provided'''
self.partitions[name]=partition
def del_partition(self,name):
self.partitions.pop(name)
if name=='size':
self.binned = False
def size_partitioning(self,scale='log',n_bins=10):
print(self.vm*' > > > size partitioning \n')
if self.binned == True:
pass
else:
print(self.vm*' > > > initialization: ',end='')
A = self.original
#size=A.sum(axis=0) # self.size, nuovo attributo
print('done')
# binned samples and frequencies
print(self.vm*' > > > size distribution: ',end='')
bp,p_bs = empirical_distribution(X=self.realization_size,scale=scale,n_bins=n_bins)
self.binned_sizes = bp
bs=(.5*(bp[1:]+bp[:-1])).astype(int)
self.size_distribution = dict(zip(bs,p_bs)) #
print('done')
print(' > > > size partitioning: ',end='')
# initialize sample container
mc = {}
for b in bs: mc[b] = []
# create the structure to feed into pandas multi-index
for t in self.realization_size.index:
v = np.argmin(np.abs( self.realization_size[t]-bs ))
#mc[t]=bs[v]
mc[ bs[v] ].append(t)
self.partitions['size']=mc
print('added')
self.binned = True
print(closing_message)
def built_in_transform(self,which=[]):
''' apply the built in transforms to the dataset'''
print(self.vm*' > > > built-in transform',end=self.vm*'\n')
if which==[]: transforms = self.bits.keys()
else: transforms = which
for t in transforms:
print(self.vm*f' > > > {t} transform: ',end=self.vm*'\n')
f=self.bits[t]
self.form[t]=f(self.original)
print(self.vm*'done',end=self.vm*'\n')
print(self.vm*closing_message)
# aggiungi i metodi remove per ogni add
def add_transform(self,fury):
''' fury: a dictionary with function name as keys and the function itself as value'''
for f in fury.keys():
if f not in self.bits.keys():
g=fury[f]
else:
print(f'{f} already exists. Built-in transform will be provided')
g=self.bits[f]
print(f' > > > {f} transform: ',end='')
self.form[f]=g(self.original)
print('done')
print(closing_message)
def del_transform(self,transform):
try:
self.form.pop(transform)
except KeyError:
print(f' * * > {transform} related observables do not exist')
pass
def get_observables(self,zipf=False,out=False,grouping='original',axis=1):
print(self.vm*f' > > > observables {grouping}',end=self.vm*'\n')
#if self.observables[grouping] !=
fake_cols = pd.MultiIndex.from_tuples( list(zip(*[['fake1'],['fake2']])))
report = pd.DataFrame(index=self.components,columns=fake_cols)
report.index=report.index.rename(self.annotation)
# il binsize lo devo fare a prescindere. faccio il report già binnato
# e quello non binnato è semplicemente la media sui bin
P = self.partitions[grouping]
for f in self.form.keys():
print(self.vm*' > > > {f} processing: ',end=self.vm*'\n')
# create here multiindex
# Ho le osservabili sopra e le varie partizioni sotto nel multiindex
r = pd.DataFrame(index=self.components,columns=P.keys())
r.index = r.index.rename(self.annotation)
q = pd.DataFrame(index=self.components,columns=P.keys())
q.index = q.index.rename(self.annotation)
for p in P.keys():
samples = P[p]
r[p] = self.form[f][samples].mean(axis=axis)
q[p] = self.form[f][samples].var(axis=axis)
# indice per le partizioni
J=list(P.keys())
# indice per le osservabili,sta sopra
Im=[f'{f} mean']*len(J)
Iv=[f'{f} var']*len(J)
r.columns = pd.MultiIndex.from_tuples( list(zip(*[Im,J])) )
q.columns = pd.MultiIndex.from_tuples( list(zip(*[Iv,J])) )
report = report.merge(r,on=self.annotation,how='outer')
report = report.merge(q,on=self.annotation,how='outer')
print(self.vm*'done',end=self.vm*'\n')
if zipf==True:
print(self.vm*f' > > > zipf processing: ',end=self.vm*'\n')
r = pd.DataFrame(index=self.components,columns=P.keys())
r.index = r.index.rename(self.annotation)
for p in P.keys():
z = report['relative mean'][p].sort_values(ascending=False)
rank = np.arange(0, z.shape[0] )+1
r[p] = pd.Series(index=z.index,data=rank)
J=list(P.keys())
Iz=['zipf rank']*len(J)
r.columns = pd.MultiIndex.from_tuples( list(zip(*[Iz,J])) )
report = report.merge(r,on=self.annotation,how='outer')
print(self.vm*'done',end=self.vm*'\n')
del report['fake1']
self.observables[grouping] = report
print(self.vm*closing_message,end=self.vm*'\n')
if out==True: return report
else: pass
def del_observables(self,partition):
try:
self.observables.pop(partition)
except KeyError:
print(f' * * > {partition} related observables do not exist')
def get_annotation(self):
return self.annotation
def get_shape(self):
return self.shape
def get_samples(self):
return self.samples
def get_components(self):
return self.components
|
import sys; sys.path.append('../common')
import mylib as utils # pylint: disable=import-error
# Read args
filename = 'input.txt' if len(sys.argv) == 1 else sys.argv[1]
print(filename, '\n')
###########################
# region COMMON
REMINDER_VALUE = 20201227
def getNextValue(value: int, subjectNumber: int) -> int:
return (value * subjectNumber) % REMINDER_VALUE
def transformSubjectNumber(loopSize: int, subjectNumber: int) -> int:
value = 1
for _ in range(loopSize):
value = getNextValue(value, subjectNumber)
return value
INITIAL_SUBJECT_NUMBER = 7
def generatePublicKey(loopSize: int) -> int:
return transformSubjectNumber(loopSize, subjectNumber=INITIAL_SUBJECT_NUMBER)
def generateEncryptionKey(loopSize: int, publicKey: int) -> int:
return transformSubjectNumber(loopSize, subjectNumber=publicKey)
def findLoopSize(publicKey: int) -> int:
loopSize = 1
value = getNextValue(value=1, subjectNumber=INITIAL_SUBJECT_NUMBER)
while value != publicKey:
loopSize += 1
value = getNextValue(value, subjectNumber=INITIAL_SUBJECT_NUMBER)
return loopSize
# endregion COMMON
###########################
###########################
# FETCH DATA
###########################
lines = utils.readFileLines(filename)
cardPublicKey = int(lines[0])
doorPublicKey = int(lines[1])
########
# PART 1
########
print('--------------------------')
print('Card public key: %d' % cardPublicKey)
print('Door public key: %d' % doorPublicKey)
print('--------------------------\n')
print('Finding card loop size...')
cardLoopSize = findLoopSize(cardPublicKey)
print('Card loop size: %d\n' % cardLoopSize)
print('Finding card encryption key using door public key...')
cardEncryptionKey = generateEncryptionKey(cardLoopSize, doorPublicKey)
print('1) The encryption key for card-door handshaking: %d\n' % cardEncryptionKey)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import json
import os.path
import sys
from datetime import date
def add_worker(staff, name, post, year):
"""
Добавить данные о работнике.
"""
staff.append(
{
"name": name,
"post": post,
"year": year
}
)
return staff
def display_workers(staff):
"""
Отобразить список работников.
"""
# Проверить, что список работников не пуст.
if staff:
# Заголовок таблицы.
line = '+-{}-+-{}-+-{}-+-{}-+'.format(
'-' * 4,
'-' * 30,
'-' * 20,
'-' * 8
)
print(line)
print(
'| {:^4} | {:^30} | {:^20} | {:^8} |'.format(
"No",
"Ф.И.О.",
"Должность",
"Год"
)
)
print(line)
# Вывести данные о всех сотрудниках.
for idx, worker in enumerate(staff, 1):
print(
'| {:>4} | {:<30} | {:<20} | {:>8} |'.format(
idx,
worker.get('name', ''),
worker.get('post', ''),
worker.get('year', 0)
)
)
print(line)
else:
print("Список работников пуст.")
def select_workers(staff, period):
"""
Выбрать работников с заданным стажем.
"""
# Получить текущую дату.
today = date.today()
# Сформировать список работников.
result = []
for employee in staff:
if today.year - employee.get('year', today.year) >= period:
result.append(employee)
# Возвратить список выбранных работников.
return result
def save_workers(file_name, staff):
"""
Сохранить всех работников в файл JSON.
"""
# Открыть файл с заданным именем для записи.
with open(file_name, "w", encoding="utf-8") as fout:
# Выполнить сериализацию данных в формат JSON.
# Для поддержки кирилицы установим ensure_ascii=False
json.dump(staff, fout, ensure_ascii=False, indent=4)
def load_workers(file_name):
"""
Загрузить всех работников из файла JSON.
"""
# Открыть файл с заданным именем для чтения.
with open(file_name, "r", encoding="utf-8") as fin:
return json.load(fin)
def main(command_line=None):
# Создать родительский парсер для определения имени файла.
file_parser = argparse.ArgumentParser(add_help=False)
file_parser.add_argument(
"-d",
"--data",
action="store",
required=False,
help="The data file name"
)
# Создать основной парсер командной строки.
parser = argparse.ArgumentParser("workers")
parser.add_argument(
"--version",
action="version",
version="%(prog)s 0.1.0"
)
subparsers = parser.add_subparsers(dest="command")
# Создать субпарсер для добавления работника.
add = subparsers.add_parser(
"add",
parents=[file_parser],
help="Add a new worker"
)
add.add_argument(
"-n",
"--name",
action="store",
required=True,
help="The worker's name"
)
add.add_argument(
"-p",
"--post",
action="store",
help="The worker's post"
)
add.add_argument(
"-y",
"--year",
action="store",
type=int,
required=True,
help="The year of hiring"
)
# Создать субпарсер для отображения всех работников.
_ = subparsers.add_parser(
"display",
parents=[file_parser],
help="Display all workers"
)
# Создать субпарсер для выбора работников.
select = subparsers.add_parser(
"select",
parents=[file_parser],
help="Select the workers"
)
select.add_argument(
"-P",
"--period",
action="store",
type=int,
required=True,
help="The required period"
)
# Выполнить разбор аргументов командной строки.
args = parser.parse_args(command_line)
data_file = args.data
if data_file:
data_file = os.environ.get("WORKERS_DATA")
if not data_file:
print("The data file name is absent", file=sys.stderr)
sys.exit(1)
# Загрузить всех работников из файла, если файл существует.
is_dirty = False
if os.path.exists(data_file):
workers = load_workers(data_file)
else:
workers = []
# Добавить работника.
if args.command == "add":
workers = add_worker(
workers,
args.name,
args.post,
args.year
)
is_dirty = True
# Отобразить всех работников.
elif args.command == "display":
display_workers(workers)
# Выбрать требуемых рааботников.
elif args.command == "select":
selected = select_workers(workers, args.period)
display_workers(selected)
# Сохранить данные в файл, если список работников был изменен.
if is_dirty:
save_workers(data_file, workers)
if __name__ == "__main__":
main()
|
import pyautogui
import pydirectinput
import random
import time
import sys
import os
confidence = 0.85 #0.75
def click(loca):
if loca != None:
R = random.randint(2, 8)
C = random.randint(2,7)
pyautogui.moveTo(loca[0]+loca[2]//R,loca[1]+loca[3]//C,duration = 0.1)
pydirectinput.click()
return True
else:
return False
def moveTo(loca):
if loca != None:
R = random.randint(2, 8)
C = random.randint(2,7)
pyautogui.moveTo(loca[0]+loca[2]//R,loca[1]+loca[3]//C,duration = 0.1)
return True
else:
return False
def clickWhenSee(img):
tmp = findImgRegion(img,0,0,1400,800)
if tmp != None:
click(tmp)
return True
else:
return False
def clickWhenSeeRegion(img,x,y,w,h):
tmp = findImgRegion(img,x,y,w,h)
if tmp != None:
click(tmp)
return True
else:
return False
def clickWhenSeeRegionS(imgs,x,y,w,h):
idx, tmp = findImgsRegion(imgs,x,y,w,h)
if tmp != None:
click(tmp)
return True
else:
return False
def moveToWhenSee(img):
tmp = findImgRegion(img,0,0,1400,800)
if tmp != None:
moveTo(tmp)
return True
else:
return False
def tillFindClick(img):
tmp = None
while tmp == None:
tmp = findImgRegion(img,0,0,1400,800)
#time.sleep(0.3)
click(tmp)
def tillFindMoveTo(img):
tmp = None
while tmp == None:
tmp = findImgRegion(img,0,0,1400,800)
#time.sleep(1)
moveTo(tmp)
def tillFindClickTime(imgs,sec):
tmp = None
while tmp == None and sec > 0:
idx,tmp = findImgs(imgs)
#time.sleep(0.1)
sec-=1
if tmp != None:
randomDis = random.randint(5, 40)
clickRelativeImg(tmp,0,60+randomDis)
return tmp
def clickRelative(disW,disH):
R = random.randint(1, 10)
C = random.randint(1,11)
pyautogui.moveRel(R+disW,C+disH,duration = 0.1)
pydirectinput.click()
def clickRelativeImg(loca,disW,disH):
if loca != None:
R = random.randint(2, 8)
C = random.randint(2,7)
pyautogui.moveTo(loca[0]+loca[2]//R+disW,loca[1]+loca[3]//C + disH,duration = 0.1)
pydirectinput.click()
return True
else:
return False
def clickRelativeImgBotRight(loca,disW,disH):
if loca != None:
R = random.randint(2, 8)
C = random.randint(2,7)
pyautogui.moveTo(loca[0]+loca[2]+disW+R,loca[1]+loca[3] + disH+C,duration = 0.1)
pydirectinput.click()
return True
else:
return False
def findImgRegion(img,x,y,w,h):
found = pyautogui.locateOnScreen(img, region = (x,y,w,h), confidence = confidence)
if found != None:
print(os.path.split(img)[1].split('.')[0]+' has been found')
else:
#print(img+' is not found')
pass
return found
def clickImgsRel(Imgs,disW,disH):
idx,loca = findImgs(Imgs)
isSuccess = clickRelativeImg(loca,disW,disH)
return isSuccess
def clickImgsRelBotRight(Imgs,disW,disH):
idx,loca = findImgs(Imgs)
isSuccess = clickRelativeImgBotRight(loca,disW,disH)
return isSuccess
def findImgs(Imgs):
found = None
for i,each in enumerate(Imgs):
found = pyautogui.locateOnScreen(each, region = (0,0,1400,800), confidence = confidence)
if found != None:
print(os.path.split(each)[1].split('.')[0]+' has been found')
break
else:
#print(each+' is not found')
pass
return i,found
def findImgsRegion(Imgs,x,y,w,h):
found = None
for i,each in enumerate(Imgs):
found = pyautogui.locateOnScreen(each, region = (x,y,w,h), confidence = confidence)
if found != None:
print(os.path.split(each)[1].split('.')[0]+' has been found')
break
else:
#print(each+' is not found')
if i == len(Imgs) - 1:
i = i + 1
print('nothing is not found')
return i,found
def hasImgs(imgs):
idx,found = findImgs(imgs)
if found != None:
return True
else:
return False
def hasImgsRegion(imgs,x,y,w,h):
idx,found = findImgsRegion(imgs,x,y,w,h)
if found != None:
return True
else:
return False
def findImgSize(img):
found = findImgRegion(img,0,0,1400,800)
if found !=None:
return found[2],found[3]
else:
print('size is not available')
return None
def viewLeftRight(screenW,screenH,dis):
R = random.randint(1, 10)
C = random.randint(1,11)
pyautogui.moveTo(screenW//2-R,screenH//2+C,duration = 0.1)
pyautogui.dragRel(dis, 0, duration=0.2)
def viewUpDown(screenW,screenH,dis):
R = random.randint(1, 55)
C = random.randint(1,44)
pyautogui.moveTo(screenW//2-R,screenH//2-C,duration = 0.1)
pyautogui.dragRel(0, dis, duration=0.15)
def timedExecute(func,sec,arguments):
while sec>0:
func(*arguments)
#time.sleep(1)
sec-=1
def randomClickWithinRegion(x1,x2,y1,y2):
x = random.randint(x1, x2)
y = random.randint(y1,y2)
pydirectinput.click(x,y)
def randomDragUpWithinRegion(x1,x2,y1,y2,dis):
x = random.randint(x1, x2)
y = random.randint(y1,y2)
pyautogui.moveTo(x,y,duration = 0.1)
pyautogui.dragRel(0, dis, duration=0.2)
def randomDragWithinRegion(x1,x2,y1,y2,disX,disY,holdTime):
x = random.randint(x1, x2)
y = random.randint(y1,y2)
pyautogui.moveTo(x,y)
pyautogui.dragRel(disX, disY, duration=holdTime)
def randomDragRightWithinRegion(x1,x2,y1,y2,dis):
x = random.randint(x1, x2)
y = random.randint(y1,y2)
pyautogui.moveTo(x,y,duration = 0.1)
pyautogui.dragRel(dis, 0, duration=0.2)
|
import matplotlib.pyplot as plt
from numpy import ones, diff, eye
from RobustLassoFPReg import RobustLassoFPReg
def FitVAR1(X, p=None, nu=10**9, lambda_beta=0, lambda_phi=0, flag_rescale=0):
# This function estimates the 1-step parameters of the VAR[0] process via lasso regression (on first differences)
# INPUTS
# X : [matrix] (n_ x t_end) historical series of independent variables
# p : [vector] (1 x t_end) flexible probabilities
# nu : [scalar] degrees of freedom of multivariate Student t
# lambda_beta : [scalar] lasso regression parameter for loadings
# lambda_phi : [scalar] lasso regression parameter for covariance matrix
# flag_rescale : [boolean flag] if 0 (default), the series is not rescaled before estimation
# OPS
# output1 : [vector](n_ x 1) output1 = alpha
# output2 : [matrix](n_ x n_) output2 = b
# output3 : [matrix](n_ x n_) output3 = sig2_U
## Code
dX = diff(X,1,1)
n_, t_ = dX.shape
if p is None:
p = ones((1,t_))/t_
# robust lasso + glasso regression
alpha, beta, sig2_U = RobustLassoFPReg(dX, X[:,:-1], p, nu, 10**-6, lambda_beta, lambda_phi, flag_rescale)
output1 = alpha
output2 = (eye(n_)+beta)
output3 = sig2_U
return output1, output2, output3
|
from scraping.api.views import CarViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'', CarViewSet, base_name='cars')
urlpatterns = router.urls
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLibensemble(PythonPackage):
"""Library for managing ensemble-like collections of computations."""
homepage = "https://libensemble.readthedocs.io"
url = "https://pypi.io/packages/source/l/libensemble/libensemble-0.5.0.tar.gz"
git = "https://github.com/Libensemble/libensemble.git"
version('develop', branch='develop')
version('0.5.0', sha256='c4623171dee049bfaa38a9c433609299a56b1afb774db8b71321247bc7556b8f')
version('0.4.1', sha256='282c32ffb79d84cc80b5cc7043c202d5f0b8ebff10f63924752f092e3938db5e')
version('0.4.0', sha256='9384aa3a58cbc20bbd1c6fddfadb5e6a943d593a3a81c8665f030dbc6d76e76e')
version('0.3.0', sha256='c8efdf45d0da0ef6299ee778cea1c285c95972af70d3a729ee6dc855e66f9294')
version('0.2.0', 'ee96047594a3f5a1533f24d3b1f365f9')
version('0.1.0', '0c3d45dd139429de1a5273e5bd8e46ec')
depends_on('python@3.4:', when='@0.5.0:')
depends_on('python@2.7:2.8,3.3:', when='@:0.4.1')
depends_on('py-setuptools', type='build')
depends_on('mpi')
depends_on('py-mpi4py@2.0:', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-petsc4py', type=('build', 'run'))
depends_on('py-petsc4py@develop', type=('build', 'run'), when='@develop')
depends_on('nlopt', type=('build', 'run'))
|
# Copyright 2018 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run test phase."""
import asyncio
import argparse
import logging
import unittest
import sys
import zaza.model
import zaza.charm_lifecycle.utils as utils
def run_test_list(tests):
"""Run the tests as defined in the list of test classes in series.
:param tests: List of test class strings
:type tests: ['zaza.charms_tests.svc.TestSVCClass1', ...]
:raises: AssertionError if test run fails
"""
for _testcase in tests:
testcase = utils.get_class(_testcase)
suite = unittest.TestLoader().loadTestsFromTestCase(testcase)
test_result = unittest.TextTestRunner(verbosity=2).run(suite)
assert test_result.wasSuccessful(), "Test run failed"
def test(model_name, tests):
"""Run all steps to execute tests against the model."""
zaza.model.set_juju_model(model_name)
run_test_list(tests)
def parse_args(args):
"""Parse command line arguments.
:param args: List of configure functions functions
:type list: [str1, str2,...] List of command line arguments
:returns: Parsed arguments
:rtype: Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--tests', nargs='+',
help='Space separated list of test classes',
required=False)
parser.add_argument('-m', '--model-name', help='Name of model to remove',
required=True)
parser.add_argument('--log', dest='loglevel',
help='Loglevel [DEBUG|INFO|WARN|ERROR|CRITICAL]')
parser.set_defaults(loglevel='INFO')
return parser.parse_args(args)
def main():
"""Run the tests defined by the command line args.
Run the tests defined by the command line args or if none were provided
read the tests from the charms tests.yaml config file
"""
args = parse_args(sys.argv[1:])
level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(level, int):
raise ValueError('Invalid log level: "{}"'.format(args.loglevel))
logging.basicConfig(level=level)
tests = args.tests or utils.get_charm_config()['tests']
test(args.model_name, tests)
asyncio.get_event_loop().close()
|
from flask import Flask, jsonify, make_response, request
app = Flask(__name__)
@app.route("/score", methods=["POST"])
def score():
features = request.json["X"]
response = make_response(jsonify({"score": features}))
return response
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
|
"""Jahnke
kenny.jahnke@ndus.edu / greensaber77@gmail.com
CSCI 160 - Spring 2022
Lab 1 Part A
Using SimpleGraphics, draw a face with a minimum of 2 eyes, 1 nose,
and 1 mouth.
"""
from simple_graphics.SimpleGraphics import *
setSize(400, 300)
# https://trinket.io/docs/colors
setBackground("hot pink")
# no matter the width of the display, the face is centered along the x-axis
sun_x = getWidth() / 2
# draw head
setFill("lime")
ellipse(sun_x, 80, 80, 60)
# draw mouth
setFill("black")
blob(sun_x - 50, 90, sun_x - 30, 110, sun_x - 10, 130, sun_x + 10, 110,
sun_x + 30, 90)
# draw eyes
setFill("deep sky blue")
circle(sun_x - 50, 65, 15)
circle(sun_x + 30, 55, 22)
setFill("indigo")
circle(sun_x - 53, 66, 8)
circle(sun_x + 25, 65, 6)
# draw nose
setLineWidth(5)
line(sun_x - 10, 60, sun_x - 20, 70)
line(sun_x - 10, 80, sun_x - 20, 70)
# All work and no play makes Jack a dull boy.
|
import os
import glob
import shutil
import re
def main():
g4_list = ['HuTaoLexer.g4', 'HuTaoParser.g4']
out = "cppout"
os.system(f'antlr4 -Dlanguage=Cpp {" ".join(g4_list)} -o {out}')
files = glob.glob("cppout/HuTao*")
for file in files:
if not (file.endswith('.h') or file.endswith('cpp')):
continue
if 'Lexer' in file:
dir_name = 'Lexer'
elif 'Parser' in file:
dir_name = 'Parser'
with open(file, 'r', encoding='utf-8') as f:
source = f.read()
source.replace('"antlr4-runtime.h"', '<antlr4-runtime.h>')
source = re.sub(r'#include "(HuTao.*\.h)"', f'#include <hutao/{dir_name}/\g<1>>', source)
with open(file, 'w', encoding='utf-8') as f:
f.write(source)
if file.endswith('.h'):
shutil.copy(file, f"../include/hutao/{dir_name}")
else:
shutil.copy(file, f"../source/{dir_name}")
if __name__ == "__main__":
main()
|
import yaml
from appium import webdriver
from appium.webdriver.webdriver import WebDriver
class Client(object):
#安装app
driver: WebDriver
platform = "android"
@classmethod
def install_app(cls) -> WebDriver:
# caps = {}
# # 如果有必要,进行第一次安装
# # caps["app"]=''
# caps["platformName"] = "android"
# caps["deviceName"] = "hogwarts"
# caps["appPackage"] = "com.xueqiu.android"
# caps["appActivity"] = ".view.WelcomeActivityAlias"
# # 解决第一次启动的问题
# caps["autoGrantPermissions"] = "true"
# # caps['noReset']=True
#
# cls.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
# cls.driver.implicitly_wait(10)
cls.driver = cls.initDriver("install_app")
return cls.driver
#启动app
@classmethod
def restart_app(cls) -> WebDriver:
# caps = {}
#
# caps["platformName"] = "android"
# caps["deviceName"] = "hogwarts"
# caps["appPackage"] = "com.xueqiu.android"
# caps["appActivity"] = ".view.WelcomeActivityAlias"
# # 为了更快的启动,并保留之前的数据,从而可以保存上一个case执行后的状态
# caps['noReset'] = True
# # caps['chromedriverExecutableDir'] = "/Users/seveniruby/projects/chromedriver/2.20"
# caps['unicodeKeyboard'] = True
# caps['resetKeyboard'] = True
# # caps["udid"]="emulator-5554"
#
# cls.driver = webdriver.Remote("http://localhost:4723/wd/hub", caps)
# cls.driver.implicitly_wait(10)
cls.driver = cls.initDriver("restart_app")
return cls.driver
@classmethod
def initDriver(cls,key):
driver_data = yaml.load(open("../data/driver.yaml","r"))
cls.platform = str(driver_data["platform"])
server = driver_data[key]['server']
implicitly_wait = driver_data[key]['implicitly_wait']
caps = driver_data[key]['caps'][cls.platform]
cls.driver = webdriver.Remote(server, caps)
cls.driver.implicitly_wait(implicitly_wait)
return cls.driver
|
# Copyright (c) 2020 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from tobiko.openstack.openstackclient import _client
def port_list(*args, **kwargs):
cmd = 'openstack port list {params}'
kwargs['format'] = 'json'
return _client.execute(cmd, *args, **kwargs)
def port_show(port, *args, **kwargs):
cmd = f'openstack port show {{params}} {port}'
kwargs['format'] = 'json'
return _client.execute(cmd, *args, **kwargs)
def port_create(port_name, network_name, *args, **kwargs):
cmd = f'openstack port create {{params}} --network {network_name} '\
f'{port_name}'
kwargs['format'] = 'json'
return _client.execute(cmd, *args, **kwargs)
def port_delete(ports, *args, **kwargs):
cmd = f'openstack port delete {{params}} {" ".join(ports)}'
return _client.execute(cmd, *args, **kwargs)
def port_set(port, *args, **kwargs):
cmd = f'openstack port set {{params}} {port}'
return _client.execute(cmd, *args, **kwargs)
def port_unset(port, *args, **kwargs):
cmd = f'openstack port unset {{params}} {port}'
return _client.execute(cmd, *args, **kwargs)
|
# super
class A:
def work(self):
print("A类的work被调用")
class B(A):
def work(self):
print("B类的work被调用")
b = B()
# 调用子类的work方法
b.work()
# 调用父类的work方法
super(B, b).work()
|
"""
author: Antoine Spahr
date : 29.09.2020
----------
TO DO :
"""
import os
import pandas as pd
import numpy as np
import skimage.io as io
import skimage.transform
import skimage.draw
import skimage
import cv2
import torch
from torch.utils import data
import nibabel as nib
import pydicom
import src.dataset.transforms as tf
from src.utils.ct_utils import window_ct, resample_ct
class public_SegICH_Dataset2D(data.Dataset):
"""
Define a torch dataset enabling to load 2D CT and ICH mask.
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256):
"""
Build a dataset for the 2D annotated segmentation of ICH.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a patient number, a slice
| number, an image filename and a mask filename.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
OUTPUT
|---- ICH_Dataset2D (torch.Dataset) the 2D dataset.
"""
super(public_SegICH_Dataset2D, self).__init__()
self.data_df = data_df
self.data_path = data_path
self.window = window
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
def __len__(self):
"""
Return the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Extract the CT and corresponding mask sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- slice (torch.tensor) the CT image with dimension (1 x H x W).
|---- mask (torch.tensor) the segmentation mask with dimension (1 x H x W).
|---- patient_nbr (torch.tensor) the patient id as a single value.
|---- slice_nbr (torch.tensor) the slice number as a single value.
"""
# load image
slice = io.imread(self.data_path + self.data_df.iloc[idx].CT_fn)
if self.window:
slice = window_ct(slice, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# load mask if one, else make a blank array
if self.data_df.iloc[idx].mask_fn == 'None':
mask = np.zeros_like(slice)
else:
mask = io.imread(self.data_path + self.data_df.iloc[idx].mask_fn)
# get the patient id
patient_nbr = torch.tensor(self.data_df.iloc[idx].PatientNumber)
# get slice number
slice_nbr = torch.tensor(self.data_df.iloc[idx].SliceNumber)
# Apply the transform : Data Augmentation + image formating
slice, mask = self.transform(slice, mask)
return slice, mask, patient_nbr, slice_nbr
class public_SegICH_AttentionDataset2D(data.Dataset):
"""
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256):
"""
Build a dataset for the 2D annotated segmentation of ICH with attention map.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a patient number, a slice
| number, an image filename, a mask filename, and an attention map filename.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
OUTPUT
|---- ICH_Dataset2D (torch.Dataset) the 2D dataset.
"""
super(public_SegICH_AttentionDataset2D, self).__init__()
self.data_df = data_df
self.data_path = data_path
self.window = window
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
def __len__(self):
"""
Return the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Extract the stacked CT and attention map, the corresponding ground truth mask, volume id, and slice number sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- input (torch.tensor) the CT image stacked with the attention map with dimension (2 x H x W).
|---- mask (torch.tensor) the segmentation mask with dimension (1 x H x W).
|---- patient_nbr (torch.tensor) the patient id as a single value.
|---- slice_nbr (torch.tensor) the slice number as a single value.
"""
# load image
slice = io.imread(self.data_path + self.data_df.iloc[idx].ct_fn)
if self.window:
slice = window_ct(slice, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# load attention map and stack it with the slice
if self.data_df.iloc[idx].attention_fn == 'None':
attention_map = np.zeros_like(slice)
else:
attention_map = skimage.img_as_float(io.imread(self.data_path + self.data_df.iloc[idx].attention_fn))
attention_map = skimage.transform.resize(attention_map, slice.shape[:2], order=1, preserve_range=True)
input = np.stack([slice, attention_map], axis=2)
# load mask if one, else make a blank array
if self.data_df.iloc[idx].mask_fn == 'None':
mask = np.zeros_like(slice)
else:
mask = io.imread(self.data_path + self.data_df.iloc[idx].mask_fn)
# get the patient id
patient_nbr = torch.tensor(self.data_df.iloc[idx].id)
# get slice number
slice_nbr = torch.tensor(self.data_df.iloc[idx].slice)
# Apply the transform : Data Augmentation + image formating
input, mask = self.transform(input, mask)
return input, mask, patient_nbr, slice_nbr
class public_SegICH_Dataset3D(data.Dataset):
"""
Define a torch dataset enabling to load 3D CT and ICH mask from NIfTI.
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.RandomZCrop(Z=64), tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, resampling_dim=(-1, -1, 2.5),
resampling_order=1):
"""
Build a dataset for the 3D annotated segmentation of ICH from NIfTI images.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a patient number, an
| image filename and a mask filename.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- resampling_dim (tuple (x, y, z)) the output pixel dimension for volume reampling. If value is set to
| -1, the input pixel dimension is used.
|---- resampling_order (int) define the interpolation strategy for the resampling. Must be between 0 and 5.
| See scipy.ndimage.zoom().
OUTPUT
|---- ICH_Dataset3D (torch.Dataset) the 3D dataset.
"""
super(public_SegICH_Dataset3D, self).__init__()
self.data_df = data_df
self.data_path = data_path
self.window = window
self.resampling_dim = resampling_dim
self.resampling_order = resampling_order
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
def __len__(self):
"""
Return the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Get the CT-volumes of the given patient idx.
----------
INPUT
|---- idx (int) the patient index in self.PatientID_list to extract.
OUTPUT
|---- volume (torch.Tensor) the CT-volume in a tensor (H x W x Slice)
"""
# load data
ct_nii = nib.load(self.data_path + self.data_df.loc[idx, 'CT_fn'])
mask_nii = nib.load(self.data_path + self.data_df.loc[idx, 'mask_fn'])
pID = torch.tensor(self.data_df.loc[idx, 'PatientNumber'])
# get volumes and pixel dimension
ct_vol = np.rot90(ct_nii.get_fdata(), axes=(0,1))
mask = np.rot90(mask_nii.get_fdata(), axes=(0,1))
pix_dim = ct_nii.header['pixdim'][1:4] # recover pixel physical dimension
# window CT-scan for soft tissus
ct_vol = window_ct(ct_vol, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# resample vol and mask
ct_vol = resample_ct(ct_vol, pix_dim, out_pixel_dim=self.resampling_dim, preserve_range=True,
order=self.resampling_order)
mask = resample_ct(mask, pix_dim, out_pixel_dim=self.resampling_dim, preserve_range=True,
order=0)#self.resampling_order)
ct_vol, mask = self.transform(ct_vol, mask)
return ct_vol, mask.bool(), pID
class brain_extract_Dataset2D(data.Dataset):
"""
Define a torch dataset enabling to load 2D CT and brain mask.
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256):
"""
Build a dataset for the 2D annotated segmentation of brain.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a volume number, a slice
| number, an image filename and a mask filename.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
OUTPUT
|---- brain_Dataset2D (torch.Dataset) the 2D dataset.
"""
super(brain_extract_Dataset2D, self).__init__()
self.data_df = data_df
self.data_path = data_path
self.window = window
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
def __len__(self):
"""
Return the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Extract the CT and corresponding mask sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- slice (torch.tensor) the CT image with dimension (1 x H x W).
|---- mask (torch.tensor) the segmentation mask with dimension (1 x H x W).
|---- patient_nbr (torch.tensor) the patient id as a single value.
|---- slice_nbr (torch.tensor) the slice number as a single value.
"""
# load image
slice = io.imread(os.path.join(self.data_path, self.data_df.iloc[idx].ct_fn))
if self.window:
slice = window_ct(slice, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# load mask if one, else make a blank array
if self.data_df.iloc[idx].mask_fn == 'None':
mask = np.zeros_like(slice)
else:
mask = io.imread(os.path.join(self.data_path, self.data_df.iloc[idx].mask_fn))
# get the patient id
vol_id = torch.tensor(self.data_df.iloc[idx].volume)
# get slice number
slice_nbr = torch.tensor(self.data_df.iloc[idx].slice)
# Apply the transform : Data Augmentation + image formating
slice, mask = self.transform(slice, mask)
return slice, mask, vol_id, slice_nbr
class RSNA_dataset(data.Dataset):
"""
Dataset object to load the RSNA data.
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256,
mode='standard', n_swap=10, swap_w=15, swap_h=15, swap_rot=False, contrastive_augmentation=None):
"""
Build a dataset for the RSNA dataset of ICH CT slice.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a filename and a columns
| Hemorrhage specifying if slice has or not an hemorrhage.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
|---- mode (str) define how to load the RSNA dataset. 'standard': return an image with its label.
| 'context_restoration': return the image and the corruped image. 'contrastive': return two heavilly
| augmented version of the input image. 'binary_classification': return image and binary label (ICH vs No-ICH).
|---- n_swap (int) the number of swap to use in the context_restoration mode.
|---- swap_h (int) the height of the swapped patch in the context_restoration mode.
|---- swap_w (int) the width of the swapped patch in the context_restoration mode.
|---- swap_rot (bool) whether to rotate patches. If true, swap_h must be None.
|---- contrastive_augmentation (list of transformation) the list of augmentation to apply in the contrastive
| mode. They must be composable by tf.Compose.
OUTPUT
|---- RSNA_dataset (torch.Dataset) the RSNA dataset.
"""
super(RSNA_dataset, self).__init__()
self.data_df = data_df.copy()
self.n_sample = len(data_df)
self.data_path = data_path
self.window = window
assert mode in ['standard', 'context_restoration', 'contrastive', 'binary_classification', 'multi_classification'], f"Invalid mode. Must be one of 'standard', 'context_restoration', 'contrastive', 'binary_classification'. Given : {mode}"
self.mode = mode
self.transform = tf.Compose(*augmentation_transform, tf.Resize(H=output_size, W=output_size))#,
#tf.ToTorchTensor())
self.toTensor = tf.ToTorchTensor()
if mode == 'context_restoration':
self.swap_tranform = tf.RandomPatchSwap(n=n_swap, w=swap_w, h=swap_h, rotate=swap_rot)
elif mode == 'contrastive':
self.contrastive_transform = tf.Compose(*contrastive_augmentation)
elif mode == 'multi_classification':
# add a columns 'no_Hemorrage'
self.data_df['no_Hemorrhage'] = 1 - self.data_df.Hemorrhage
# name of the classes
self.class_name = ['no_Hemorrhage', 'Hemorrhage', 'epidural', 'intraparenchymal', 'intraventricular', 'subarachnoid', 'subdural']
def __len__(self):
"""
eturn the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return self.n_sample
def __getitem__(self, idx):
"""
Extract the CT sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- im (torch.tensor) the CT image with dimension (1 x H x W).
|---- lab (torch.tensor) the label for hemorrhage presence (0 or 1).
|---- idx (torch.tensor) the sample idx.
"""
# load dicom and recover the CT pixel values
dcm_im = pydicom.dcmread(self.data_path + self.data_df.iloc[idx].filename)
im = (dcm_im.pixel_array * float(dcm_im.RescaleSlope) + float(dcm_im.RescaleIntercept))
# Window the CT-scan
if self.window:
im = window_ct(im, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
if self.mode == 'standard':
# transform image
im = self.transform(im)
return self.toTensor(im), torch.tensor(idx) #torch.tensor(lab), torch.tensor(idx)
elif self.mode == 'context_restoration':
# generate corrupeted version
# transform image
im = self.transform(im)
swapped_im = self.swap_tranform(im)
return self.toTensor(im), self.toTensor(swapped_im), torch.tensor(idx)
elif self.mode == 'contrastive':
# augmente image twice
im1 = self.contrastive_transform(self.transform(im))
im2 = self.contrastive_transform(self.transform(im))
return self.toTensor(im1), self.toTensor(im2), torch.tensor(idx)
elif self.mode == 'binary_classification':
im = self.transform(im)
label = self.data_df.iloc[idx].Hemorrhage
return self.toTensor(im), torch.tensor(label), torch.tensor(idx)
elif self.mode == 'multi_classification':
im = self.transform(im)
samp = self.data_df.iloc[idx]
label = [samp[name] for name in self.class_name]
return self.toTensor(im), torch.tensor(label), torch.tensor(idx)
class RSNA_Inpaint_dataset(data.Dataset):
"""
Dataset object to load the RSNA data for the inpainting task.
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256, n_draw=(1,4), vertex=(5,15),
brush_width=(10,30), angle=(0.0,6.28), length=(10,30), n_salt_pepper=(0,10), salt_peper_radius=(1,3)):
"""
Build a dataset for the RSNA dataset CT slice.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a filename and a columns
| Hemorrhage specifying if slice has or not an hemorrhage.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
|---- n_draw (tuple (low, high)) range of number of inpaint element to draw.
|---- vertex (tuple (low, high)) range of number of vertex for each inpaint element.
|---- brush_width (tuple (low, high)) range of brush size to draw each inpaint element.
|---- angle (tuple (low, high)) the range of angle between each vertex of an inpaint element. Note that every
| two segment, Pi is added to the angle to keep the drawing in the vicinity. Angle in radian.
|---- length (tuple (low, high)) range of length for each segment.
|---- n_salt_pepper (tuple (low, high)) range of number of salt and pepper disk element to draw. Set to (0,1)
| for no salt and pepper elements.
|---- salt_peper_radius (tuple (low, high)) range of radius for the salt and pepper disk element.
OUTPUT
|---- RSNA_Inpaint_dataset (torch.Dataset) the RSNA dataset for inpainting.
"""
super(RSNA_Inpaint_dataset, self).__init__()
self.data_df = data_df
self.data_path = data_path
self.window = window
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
self.n_draw = n_draw
self.vertex = vertex
self.brush_width = brush_width
self.angle = angle
self.length = length
self.n_salt_pepper = n_salt_pepper
self.salt_peper_radius = salt_peper_radius
def __len__(self):
"""
eturn the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Extract the CT sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- im (torch.tensor) the CT image with dimension (1 x H x W).
|---- mask (torch.tensor) the inpaining mask with dimension (1 x H x W).
"""
# load dicom and recover the CT pixel values
dcm_im = pydicom.dcmread(self.data_path + self.data_df.iloc[idx].filename)
im = (dcm_im.pixel_array * float(dcm_im.RescaleSlope) + float(dcm_im.RescaleIntercept))
# Window the CT-scan
if self.window:
im = window_ct(im, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# transform image
im = self.transform(im)
# get a mask
mask = self.random_ff_mask((im.shape[1], im.shape[2]))
return im, tf.ToTorchTensor()(mask)
def random_ff_mask(self, shape):
"""
Generate a random inpainting mask with given shape.
----------
INPUT
|---- shape (tuple (h,w)) the size of the inpainting mask.
OUTPUT
|---- mask (np.array) the inpainting mask with value 1 on region to inpaint and zero otherwise.
"""
h, w = shape
mask = np.zeros(shape)
# draw random number of patches
for _ in range(np.random.randint(low=self.n_draw[0], high=self.n_draw[1])):
n_vertex = np.random.randint(low=self.vertex[0], high=self.vertex[1])
brush_width = np.random.randint(low=self.brush_width[0], high=self.brush_width[1])
start_x, start_y = int(np.random.normal(w/2, w/8)), int(np.random.normal(h/2, h/8))
#start_x, start_y = np.random.randint(low=0, high=w), np.random.randint(low=0, high=h)
beta = np.random.uniform(low=0, high=6.28)
for i in range(n_vertex):
angle = beta + np.random.uniform(low=self.angle[0], high=self.angle[1])
length = np.random.randint(low=self.length[0], high=self.length[1])
if i % 2 == 0:
angle = np.pi + angle #2 * np.pi - angle # reverse mode
# draw line
end_x = (start_x + length * np.sin(angle)).astype(np.int32)
end_y = (start_y + length * np.cos(angle)).astype(np.int32)
cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_width)
# set new start point
start_x, start_y = end_x, end_y
# salt and pepper
for _ in range(np.random.randint(low=self.n_salt_pepper[0], high=self.n_salt_pepper[1])):
start_x, start_y = np.random.randint(low=0, high=w), np.random.randint(low=0, high=h)
r = np.random.randint(low=self.salt_peper_radius[0], high=self.salt_peper_radius[1])
cv2.circle(mask, (start_x, start_y), r, 1.0, -1)
return mask
class ImgMaskDataset(data.Dataset):
"""
Dataset object to load an image and mask together.
"""
def __init__(self, data_df, data_path, augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)], window=None, output_size=256):
"""
Build a dataset for loading image and mask.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a columns 'im_fn' with
| image filepath and a column 'mask_fn' with mask filepath.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for image intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
OUTPUT
|---- RSNA_Inpaint_dataset (torch.Dataset) the RSNA dataset for inpainting.
"""
super(ImgMaskDataset, self).__init__()
self.data_df = data_df
self.data_path = data_path
self.window = window
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
def __len__(self):
"""
eturn the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Extract the image and mask sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- im (torch.tensor) the image with dimension (1 x H x W).
|---- mask (torch.tensor) the mask with dimension (1 x H x W).
|---- idx (torch.tensor) the data index in the data_df.
"""
# load dicom and recover the CT pixel values
im = io.imread(os.path.join(self.data_path, self.data_df.iloc[idx].im_fn))
mask = io.imread(os.path.join(self.data_path, self.data_df.iloc[idx].mask_fn))
# Window the CT-scan
if self.window:
im = window_ct(im, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# transform image
im, mask = self.transform(im, mask)
return im, mask, torch.tensor(idx)
class RSNA_FCDD_dataset(data.Dataset):
""" """
def __init__(self, data_df, data_path, artificial_anomaly=True, anomaly_proba=0.5,
augmentation_transform=[tf.Translate(low=-0.1, high=0.1), tf.Rotate(low=-10, high=10),
tf.Scale(low=0.9, high=1.1), tf.HFlip(p=0.5)],
window=None, output_size=256,
drawing_params=dict(n_ellipse=(1,10), major_axis=(1,25), minor_axis=(1,25),
rotation=(0,2*np.pi), intensity=(0.1, 1), noise=None)):
"""
Build a dataset for the RSNA dataset for FCDD training.
----------
INPUT
|---- data_df (pd.DataFrame) the input dataframe of samples. Each row must contains a filename and a columns
| Hemorrhage specifying if slice has or not an hemorrhage.
|---- data_path (str) path to the root of the dataset folder (until where the samples' filnames begins).
|---- artificial_anomaly (bool) whether to generate anomalies with drawing of ellipse on top of image. If False,
| the dataset will return labled hemorrhagous as anomalies.
|---- anomaly_proba (float in [0.0,1.0]) the probability to geenrate an artificial anomaly. Ignored if
| artificial_anomaly is False.
|---- augmentation_transform (list of transofrom) data augmentation transformation to apply.
|---- window (tuple (center, width)) the window for CT intensity rescaling. If None, no windowing is performed.
|---- output_size (int) the dimension of the output (H = W).
|---- drawing_params (dict) the parameters to be passed to the ellipse drawing method.
OUTPUT
|---- RSNA_FCDD_dataset (data.Dataset)
"""
super().__init__()
assert 0.0 <= anomaly_proba <= 1.0, f"Probability of anomaly must be in [0.0 , 1.0]. Given {anomaly_proba}."
self.data_df = data_df
self.data_path = data_path
self.artificial_anomaly = artificial_anomaly
self.anomaly_proba = anomaly_proba
self.window = window
self.transform = tf.Compose(*augmentation_transform,
tf.Resize(H=output_size, W=output_size),
tf.ToTorchTensor())
self.drawing_params = drawing_params
def __len__(self):
"""
eturn the number of samples in the dataset.
----------
INPUT
|---- None
OUTPUT
|---- N (int) the number of samples in the dataset.
"""
return len(self.data_df)
def __getitem__(self, idx):
"""
Extract the CT sepcified by idx.
----------
INPUT
|---- idx (int) the sample index in self.data_df.
OUTPUT
|---- im (torch.tensor) the CT image with dimension (1 x H x W).
|---- mask (torch.tensor) the inpaining mask with dimension (1 x H x W).
"""
# load dicom and recover the CT pixel values
dcm_im = pydicom.dcmread(self.data_path + self.data_df.iloc[idx].filename)
im = (dcm_im.pixel_array * float(dcm_im.RescaleSlope) + float(dcm_im.RescaleIntercept))
# Window the CT-scan
if self.window:
im = window_ct(im, win_center=self.window[0], win_width=self.window[1], out_range=(0,1))
# transform image
im = self.transform(im)
label = self.data_df.iloc[idx].Hemorrhage
if self.artificial_anomaly and (np.random.rand() < self.anomaly_proba) and (label == 0):
# get a mask
anomalies = tf.ToTorchTensor()(self.draw_ellipses((im.shape[1], im.shape[2]), **self.drawing_params))
im = torch.where(anomalies > 0, anomalies, im)
label = 1
return im, torch.tensor(label), torch.tensor(idx)
@staticmethod
def draw_ellipses(im_shape, n_ellipse=(1,10), major_axis=(1,25), minor_axis=(1,25), rotation=(0,2*np.pi),
intensity=(0.1, 1), noise=None):
"""
Draw mltiple ellipses with randomly samples parametrers and intensity.
----------
INPUT
|---- im_shape (tuple) shape of the image to generate.
|---- n_ellipse (2-tuple low,high) the range to sample the number of ellipse from.
|---- major_axis (2-tuple low,high) the range to sample the major_axis of ellipse from.
|---- minor_axis (2-tuple low,high) the range to sample the minor axis of ellipse from.
|---- rotation (2-tuple low,high) the range to sample the angle of ellipse from (in radian).
|---- intensity (2-tuple low,high) the range to sample the pixel intensity of ellipse from.
|---- noise (float) whether to add gaussian noise to the elipse value. If None, no noise is added.
OUTPUT
|---- out (np.array) the grayscale image with a random set of ellipse drawn.
"""
h, w = im_shape
out = np.zeros(im_shape)
# generate a random number of ellipses
for _ in range(np.random.randint(low=n_ellipse[0], high=n_ellipse[1])):
# get center
r, c = int(np.random.normal(w/2, w/6)), int(np.random.normal(h/2, h/6))
# get major/minor axis
max_ax = np.random.uniform(low=major_axis[0], high=major_axis[1])
min_ax = np.random.uniform(low=minor_axis[0], high=min(minor_axis[1], max_ax))
# get angle
rot = np.random.uniform(low=rotation[0], high=rotation[1])
# compute ellipse coordinates
rr, cc = skimage.draw.ellipse(r, c, min_ax, max_ax, shape=im_shape, rotation=rot)
# draw ellipse with or without noise
if noise:
gs_val = np.random.uniform(low=intensity[0], high=intensity[1])
out[rr, cc] = np.clip(np.random.normal(gs_val, noise, size=len(rr)), 0.0, 1.0)
else:
out[rr, cc] = np.random.uniform(low=intensity[0], high=intensity[1])
return out
#%%
# def random_ff_mask(shape):
# """
#
# """
# h, w = shape
# mask = np.zeros(shape)
# # get drawing params
# for _ in range(np.random.randint(low=1, high=4)):
# n_vertex = np.random.randint(low=5, high=15+1)
# brush_width = np.random.randint(low=10, high=35+1)
# start_x, start_y = int(np.random.normal(w/2, w/8)), int(np.random.normal(h/2, h/8))
# #start_x, start_y = np.random.randint(low=0, high=w), np.random.randint(low=0, high=h)
#
# for i in range(n_vertex):
# angle = np.random.uniform(low=0.0, high=np.pi*2)
# length = np.random.randint(low=10, high=30)
# if i % 2 == 0:
# angle = np.pi + angle #2 * np.pi - angle # reverse mode
# # draw line
# end_x = (start_x + length * np.sin(angle)).astype(np.int32)
# end_y = (start_y + length * np.cos(angle)).astype(np.int32)
# cv2.line(mask, (start_x, start_y), (end_x, end_y), 1.0, brush_width)
# # set new start point
# start_x, start_y = end_x, end_y
#
# # salt and pepper
# for _ in range(np.random.randint(low=0, high=5)):
# start_x, start_y = np.random.randint(low=0, high=w), np.random.randint(low=0, high=h)
# r = np.random.randint(low=1, high=3)
# cv2.circle(mask, (start_x, start_y), r, 1.0, -1)
#
# if np.random.random() > 0.5:
# mask = np.flip(mask, axis=0)
#
# if np.random.random() > 0.5:
# mask = np.flip(mask, axis=1)
#
# return mask
#
#
# #%%
# import matplotlib.pyplot as plt
# fig, axs = plt.subplots(4,4,figsize=(10,10))
# for ax in axs.reshape(-1):
# ax.imshow(random_ff_mask((256,256)), cmap='gray')
# plt.show()
#%%
#
|
from collections import deque
from dataclasses import dataclass
from typing import Deque, Generic, List, Sequence, Set, TypeVar
from .common import ParserError, ParserErrorWithIndex, ParserTypeError
from .operators import find_binary_operator, find_unary_operator, find_variable, find_set
from .operands import *
from ..common.operators import (Associativity, OPERATORS, OPERATORS_BINARY,
OPERATORS_UNARY, OperatorName)
from ..lexer import (Token, TokenDuration, TokenHex, TokenIPv4, TokenIPv6, TokenNumber,
TokenOperator, TokenParenthesis, TokenRegex, TokenScope, TokenString,
TokenTransparent, TokenWord)
SCOPE_COUNTERPART = {
")": "(",
"]": "[",
"}": "{"
}
def parse(
tokens: Deque[Token],
vars: Dict[str, ParseAtom]
) -> Tuple[Sequence[ParseAtom], Set[str]]:
operands: Deque[Tuple[ParseAtom, Token]] = deque()
operators: Deque[Tuple[OperatorName, Token]] = deque()
dependencies: Set[str] = set()
def _pop_op() -> OperatorName:
op_head_name, op_head_token = operators.pop()
op_head = OPERATORS[op_head_name]
if op_head.operands() == 1:
if not operands:
raise ParserError(op_head_token, "missing unary operand")
right, _ = operands.pop()
atom = find_unary_operator(op_head_name, right)
else:
if not len(operands) > 1:
raise ParserError(op_head_token, "missing binary operand")
right, _ = operands.pop()
left, _ = operands.pop()
atom = find_binary_operator(op_head_name, left, right)
if atom is not None:
operands.append((atom, op_head_token))
return op_head_name
else:
raise ParserError(op_head_token, "invalid operands for operator")
last_is_operator = False
while tokens:
token = tokens.popleft()
if isinstance(token, TokenTransparent):
pass
elif isinstance(token, TokenScope):
if not token.text in SCOPE_COUNTERPART:
# scope opener
operators.append((OperatorName.SCOPE, token))
else:
# scope closer
scope_atoms: Deque[Tuple[ParseAtom, Token]] = deque()
while operators:
op_head_name, op_head_token = operators[-1]
if op_head_name == OperatorName.SCOPE:
if SCOPE_COUNTERPART[token.text] == op_head_token.text:
break
else:
raise ParserError(
op_head_token,
f"mismatched scope terminator '{op_head_token.text}'"
)
elif op_head_name == OperatorName.COMMA:
operators.pop()
scope_atoms.appendleft(operands.pop())
else:
_pop_op()
if operators:
op_head_name, op_head_token = operators.pop()
if op_head_token.text == "(":
operands.extend(scope_atoms)
elif op_head_token.text == "{":
try:
atom = find_set([atom for atom, _ in scope_atoms])
except ParserErrorWithIndex as e:
_, bad_token = scope_atoms[e.index]
raise ParserTypeError(bad_token, str(e))
if atom is not None:
operands.append((atom, op_head_token))
else:
raise ParserError(token, "invalid set content")
else:
raise ParserError(token, "unexpected scope terminator")
elif isinstance(token, TokenOperator):
if last_is_operator or not operands:
if token.text in OPERATORS_UNARY:
op_new_name = OPERATORS_UNARY[token.text]
else:
raise ParserError(token, "invalid unary operator")
else:
if token.text in OPERATORS_BINARY:
op_new_name = OPERATORS_BINARY[token.text]
else:
raise ParserError(token, "invalid binary operator")
op_new = OPERATORS[op_new_name]
# shunting yard
while operators:
op_head_name, _ = operators[-1]
op_head = OPERATORS[op_head_name]
if (op_head.associativity == Associativity.LEFT
and op_head.weight >= op_new.weight):
_pop_op()
elif (op_head.associativity == Associativity.RIGHT
and op_head.weight > op_new.weight):
_pop_op()
else:
break
operators.append((op_new_name, token))
last_is_operator = True
elif last_is_operator or not operands:
last_is_operator = False
if operators:
op_head_name, _ = operators[-1]
if op_head_name == OperatorName.SCOPE:
# put a falsified comma between scope opener and the first item.
# commas are used to know how many atoms are in a scope
operators.append((OperatorName.COMMA, token))
if isinstance(token, TokenWord):
if token.text in KEYWORDS:
keyword_atom = KEYWORDS[token.text]
operands.append((keyword_atom, token))
elif (var_type := vars.get(token.text)) is None:
raise ParserError(token, f"unknown variable {token.text}")
elif (var := find_variable(token.text, var_type)) is None:
# shouldn't happen
raise ParserError(token, f"invalid variable type {var_type!r}")
else:
dependencies.add(token.text)
operands.append((var, token))
elif isinstance(token, TokenNumber):
if "." in token.text:
operands.append((ParseConstFloat.from_text(token.text), token))
else:
operands.append((ParseConstInteger.from_text(token.text), token))
elif isinstance(token, TokenHex):
operands.append((ParseHex.from_text(token.text), token))
elif isinstance(token, TokenDuration):
operands.append((ParseDuration.from_text(token.text), token))
elif isinstance(token, TokenString):
operands.append((ParseConstString.from_text(token.text), token))
elif isinstance(token, TokenRegex):
operands.append((ParseConstRegex.from_text(token.text), token))
elif isinstance(token, TokenIPv4):
if "/" in token.text:
operands.append((ParseConstCIDRv4.from_text(token.text), token))
else:
operands.append((ParseConstIPv4.from_text(token.text), token))
elif isinstance(token, TokenIPv6):
if "/" in token.text:
operands.append((ParseConstCIDRv6.from_text(token.text), token))
else:
operands.append((ParseConstIPv6.from_text(token.text), token))
else:
raise ParserError(token, "unknown token")
else:
raise ParserError(token, "missing operator")
while operators:
op_head_name, op_head_token = operators[-1]
if op_head_name == OperatorName.SCOPE:
raise ParserError(op_head_token, "unclosed scope")
elif op_head_name == OperatorName.COMMA:
raise ParserError(op_head_token, "comma in root scope")
else:
_pop_op()
return list(atom for atom, _ in operands), dependencies
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gcloud genomics datasets restore.
"""
from googlecloudsdk.api_lib import genomics as lib
from googlecloudsdk.api_lib.genomics import genomics_util
from googlecloudsdk.api_lib.genomics.exceptions import GenomicsError
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_io
class DatasetsRestore(base.Command):
"""Restores a deleted dataset.
"""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument('id',
type=int,
help='The ID of the deleted dataset to be restored.')
@genomics_util.ReraiseHttpException
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace, All the arguments that were provided to this
command invocation.
Raises:
HttpException: An http error response was received while executing api
request.
Returns:
None
"""
prompt_message = (
'Restoring dataset {0} will restore all objects in '
'the dataset.').format(args.id)
if not console_io.PromptContinue(message=prompt_message):
raise GenomicsError('Restore aborted by user.')
apitools_client = self.context[lib.GENOMICS_APITOOLS_CLIENT_KEY]
genomics_messages = self.context[lib.GENOMICS_MESSAGES_MODULE_KEY]
dataset = genomics_messages.GenomicsDatasetsUndeleteRequest(
datasetId=str(args.id),
)
return apitools_client.datasets.Undelete(dataset)
def Display(self, args_unused, dataset):
"""This method is called to print the result of the Run() method.
Args:
args_unused: The arguments that command was run with.
dataset: The value returned from the Run() method.
"""
if dataset:
log.Print('Restored dataset {0}, name: {1}'.format(
dataset.id, dataset.name))
|
#!/usr/bin/env python
"""Tests of the geometry package."""
from math import sqrt
from numpy import (
all,
allclose,
arange,
array,
insert,
isclose,
mean,
ones,
sum,
take,
)
from numpy.linalg import inv, norm
from numpy.random import choice, dirichlet
from numpy.testing import assert_allclose
from cogent3.maths.geometry import (
aitchison_distance,
alr,
alr_inv,
center_of_mass,
center_of_mass_one_array,
center_of_mass_two_array,
clr,
clr_inv,
distance,
multiplicative_replacement,
sphere_points,
)
from cogent3.util.unit_test import TestCase, main
__author__ = "Sandra Smit"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Sandra Smit", "Rob Knight", "Helmut Simon"]
__license__ = "BSD-3"
__version__ = "2020.6.30a"
__maintainer__ = "Sandra Smit"
__email__ = "sandra.smit@colorado.edu"
__status__ = "Production"
class CenterOfMassTests(TestCase):
"""Tests for the center of mass functions"""
def setUp(self):
"""setUp for all CenterOfMass tests"""
self.simple = array([[1, 1, 1], [3, 1, 1], [2, 3, 2]])
self.simple_list = [[1, 1, 1], [3, 1, 1], [2, 3, 2]]
self.more_weight = array([[1, 1, 3], [3, 1, 3], [2, 3, 50]])
self.square = array([[1, 1, 25], [3, 1, 25], [3, 3, 25], [1, 3, 25]])
self.square_odd = array([[1, 1, 25], [3, 1, 4], [3, 3, 25], [1, 3, 4]])
self.sec_weight = array([[1, 25, 1], [3, 25, 1], [3, 25, 3], [1, 25, 3]])
def test_center_of_mass_one_array(self):
"""center_of_mass_one_array should behave correctly"""
com1 = center_of_mass_one_array
self.assertEqual(com1(self.simple), array([2, 2]))
self.assertEqual(com1(self.simple_list), array([2, 2]))
self.assertFloatEqual(com1(self.more_weight), array([2, 2.785714]))
self.assertEqual(com1(self.square), array([2, 2]))
self.assertEqual(com1(self.square_odd), array([2, 2]))
self.assertEqual(com1(self.sec_weight, 1), array([2, 2]))
def test_CoM_one_array_wrong(self):
"""center_of_mass_one_array should fail on wrong input"""
com1 = center_of_mass_one_array
self.assertRaises(TypeError, com1, self.simple, "a") # weight_idx wrong
self.assertRaises(IndexError, com1, self.simple, 100) # w_idx out of range
# shape[1] out of range
self.assertRaises(IndexError, com1, [1, 2, 3], 2)
def test_center_of_mass_two_array(self):
"""center_of_mass_two_array should behave correctly"""
com2 = center_of_mass_two_array
coor = take(self.square_odd, (0, 1), 1)
weights = take(self.square_odd, (2,), 1)
self.assertEqual(com2(coor, weights), array([2, 2]))
weights = weights.ravel()
self.assertEqual(com2(coor, weights), array([2, 2]))
def test_CoM_two_array_wrong(self):
"""center_of_mass_two_array should fail on wrong input"""
com2 = center_of_mass_two_array
weights = [1, 2]
self.assertRaises(TypeError, com2, self.simple, "a") # weight_idx wrong
self.assertRaises(ValueError, com2, self.simple, weights) # not aligned
def test_center_of_mass(self):
"""center_of_mass should make right choice between functional methods
"""
com = center_of_mass
com1 = center_of_mass_one_array
com2 = center_of_mass_two_array
self.assertEqual(com(self.simple), com1(self.simple))
self.assertFloatEqual(com(self.more_weight), com1(self.more_weight))
self.assertEqual(com(self.sec_weight, 1), com1(self.sec_weight, 1))
coor = take(self.square_odd, (0, 1), 1)
weights = take(self.square_odd, (2,), 1)
self.assertEqual(com(coor, weights), com2(coor, weights))
weights = weights.ravel()
self.assertEqual(com(coor, weights), com2(coor, weights))
def test_distance(self):
"""distance should return Euclidean distance correctly."""
# for single dimension, should return difference
a1 = array([3])
a2 = array([-1])
self.assertEqual(distance(a1, a2), 4)
# for two dimensions, should work e.g. for 3, 4, 5 triangle
a1 = array([0, 0])
a2 = array([3, 4])
self.assertEqual(distance(a1, a2), 5)
# vector should be the same as itself for any dimensions
a1 = array([1.3, 23, 5.4, 2.6, -1.2])
self.assertEqual(distance(a1, a1), 0)
# should match hand-calculated case for an array
a1 = array([[1, -2], [3, 4]])
a2 = array([[1, 0], [-1, 2.5]])
self.assertEqual(distance(a1, a1), 0)
self.assertEqual(distance(a2, a2), 0)
self.assertEqual(distance(a1, a2), distance(a2, a1))
self.assertFloatEqual(distance(a1, a2), sqrt(22.25))
def test_sphere_points(self):
"""tests sphere points"""
self.assertEqual(sphere_points(1), array([[1.0, 0.0, 0.0]]))
class TestAitchison(TestCase):
def setUp(self):
x = choice(20, size=10) + 0.1
self.x = x
a = arange(1, 7)
self.a = a
d = dirichlet(a, size=2)
self.d = d
def test_Aitchison_transforms(self):
"""Test that alr_inv of alr is in fact the inverse
of alr. Ditto for clr_inv and clr. Then test that clr
transforms into hyperplane x1 + ... + xn=0."""
length = len(self.x)
for col in range(-1, length):
y = alr_inv(self.x, col)
assert allclose(self.x, alr(y, col)), (
"Failed alr inverse test for col = " + str(col) + "."
)
z = dirichlet(self.x)
y = clr(z)
assert allclose(z, clr_inv(y)), "Failed clr inverse test."
assert allclose(sum(y), 0), "Failed clr hyperplane test."
def test_Aitchison_distance(self):
x = self.d[0]
y = self.d[1]
assert allclose(
aitchison_distance(x, y), norm(clr(x) - clr(y))
), "Failed distance test."
def test_multiplicative_replacement(self):
x1 = dirichlet(self.a)
y1 = insert(x1, 3, 0)
u = multiplicative_replacement(y1)
assert allclose(
y1, u, atol=1e-2
), "Multiplicative replacement peturbation is too large."
assert isclose(
sum(u), 1
), "Multiplicative replacement does not yield a composition."
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
import os
import sys
import subprocess
import linecache
import time
from optparse import OptionParser
import optparse
import time
#=========================
def setupParserOptions():
parser = optparse.OptionParser()
parser.set_usage("awslaunch_cluster --instance=<instanceType>")
parser.add_option("--instance",dest="instance",type="string",metavar="STRING",
help="Specify instance type to launch into cluster")
parser.add_option("--num",dest="num",type="int",metavar="INTEGER",
help="Number of instances in cluster")
parser.add_option("--availZone",dest="zone",type="string",metavar="STRING",
help="Specify availability zone")
parser.add_option("--volume",dest="ebsvol",type="string",metavar="STRING",default='none',
help="Optional: Volume ID for volume that will be mounted onto cluster")
parser.add_option("--spotPrice",dest="spot",type="float",metavar="FLOAT",default=-1,
help="Optional: Specify spot price (if spot instance requested)")
parser.add_option("--relion2", action="store_true",dest="relion2",default=False,
help="Flag to load environment with Relion2 installed")
parser.add_option("--instanceList", action="store_true",dest="listInstance",default=False,
help="Flag to list available instances")
parser.add_option("-d", action="store_true",dest="debug",default=False,
help="debug")
options,args = parser.parse_args()
if len(args) > 0:
parser.error("Unknown commandline options: " +str(args))
if len(sys.argv) < 2:
parser.print_help()
sys.exit()
params={}
for i in parser.option_list:
if isinstance(i.dest,str):
params[i.dest] = getattr(options,i.dest)
return params
#====================
def checkConflicts(params,availInstances):
if not params['zone']:
print 'Error: No availability zone specified. Exiting'
sys.exit()
if params['spot'] <= 0:
if params['spot'] != -1:
print 'Error: Spot price requested is less than or equal to 0. Try again. Exiting\n'
sys.exit()
#Check that keypair exists
keyPath=subprocess.Popen('echo $KEYPAIR_PATH',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(keyPath) == 0:
print '\nError: KEYPAIR_PATH not specified as environment variable. Exiting\n'
sys.exit()
if not os.path.exists(keyPath):
print 'Error: Key pair file %s does not exist. Exiting' %(keyPath)
sys.exit()
if keyPath.split('/')[-1].split('.')[-1] != 'pem':
print '\nError: Keypair specified is invalid, it needs to have .pem extension. Found .%s extension instead. Exiting\n' %(keyPath.split('/')[-1].split('.')[-1])
sys.exit()
#Check that enviornmental variables are set
AWS_ACCESS_KEY_ID=subprocess.Popen('echo $AWS_ACCESS_KEY_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_SECRET_ACCESS_KEY=subprocess.Popen('echo $AWS_SECRET_ACCESS_KEY',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_ACCOUNT_ID=subprocess.Popen('echo $AWS_ACCOUNT_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_DEFAULT_REGION=subprocess.Popen('echo $AWS_DEFAULT_REGION',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
starcluster=subprocess.Popen('which starcluster',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
if len(starcluster) == 0:
print '\nError: Cluster creating software (starcluster) is not installed. Install first and then try again.\n'
sys.exit()
if len(AWS_ACCESS_KEY_ID) == 0:
print '\nError: AWS_ACCESS_KEY_ID not specified as environment variable. Exiting\n'
sys.exit()
if len(AWS_SECRET_ACCESS_KEY) == 0:
print '\nError: AWS_SECRET_ACCESS_KEY not specified as environment variable. Exiting\n'
sys.exit()
if len(AWS_ACCOUNT_ID) == 0:
print '\nError: AWS_ACCOUNT_ID not specified as environment variable. Exiting\n'
sys.exit()
if len(AWS_DEFAULT_REGION) == 0:
print '\nError: AWS_DEFAULT_REGION not specified as environment variable. Exiting\n'
sys.exit()
if AWS_DEFAULT_REGION == 'us-west-2':
AMI='ami-33291d03'
if params['relion2'] is True:
AMI='ami-dc79dabc'
homedir=subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
starexecpath='%s/.starcluster/config' %(homedir)
#Check that instance is in approved list
if not params['instance'] in availInstances:
print 'Error: Instance %s is not in instance list for creating clusters on AWS' %(params['instance'])
print availInstances
sys.exit()
return keyPath.split('/')[-1].split('.')[0],keyPath,AMI,starexecpath
#==============================
def configStarcluster(params,keyName,keyPath,AMI,starpath):
if not os.path.exists('%s/.starcluster' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip())):
os.makedirs('%s/.starcluster' %(subprocess.Popen('echo $HOME',shell=True, stdout=subprocess.PIPE).stdout.read().strip()))
if os.path.exists(starpath):
os.remove(starpath)
AWS_ACCESS_KEY_ID=subprocess.Popen('echo $AWS_ACCESS_KEY_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_SECRET_ACCESS_KEY=subprocess.Popen('echo $AWS_SECRET_ACCESS_KEY',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_ACCOUNT_ID=subprocess.Popen('echo $AWS_ACCOUNT_ID',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
AWS_DEFAULT_REGION=subprocess.Popen('echo $AWS_DEFAULT_REGION',shell=True, stdout=subprocess.PIPE).stdout.read().strip()
cmd='####################################\n'
cmd+='## StarCluster Configuration File ##\n'
cmd+='####################################\n'
cmd+='[aws info]\n'
cmd+='AWS_USER_ID=%s\n' %(AWS_ACCOUNT_ID)
cmd+='AWS_ACCESS_KEY_ID =%s\n' %(AWS_ACCESS_KEY_ID)
cmd+='AWS_SECRET_ACCESS_KEY = %s\n' %(AWS_SECRET_ACCESS_KEY)
cmd+='AWS_REGION_NAME = %s\n' %(AWS_DEFAULT_REGION)
cmd+='AVAILABILITY_ZONE = %s\n' %(params['zone'])
cmd+='AWS_REGION_HOST = ec2.%s.amazonaws.com\n' %(AWS_DEFAULT_REGION)
cmd+='[global]\n'
cmd+='DEFAULT_TEMPLATE=cluster\n'
cmd+='[key %s]\n' %(keyName)
cmd+='KEY_LOCATION=%s\n' %(keyPath)
cmd+='[cluster cluster]\n'
cmd+='KEYNAME = %s\n'%(keyName)
cmd+='CLUSTER_USER = ubuntu\n'
cmd+='CLUSTER_SHELL = bash\n'
cmd+='NODE_IMAGE_ID = %s\n' %(AMI)
if params['spot'] == -1:
cmd+='FORCE_SPOT_MASTER=False\n'
if params['spot'] > 0:
cmd+='FORCE_SPOT_MASTER=True\n'
cmd+='CLUSTER_SIZE = %i\n' %(params['num'])
cmd+='NODE_INSTANCE_TYPE = %s\n' %(params['instance'])
if params['ebsvol'] != 'none':
cmd+='VOLUMES = data\n' %()
cmd+='[volume data]\n' %()
cmd+='VOLUME_ID = %s\n' %(params['ebsvol'])
cmd+='MOUNT_PATH = /data\n' %()
o1=open(starpath,'w')
o1.write(cmd)
o1.close()
#==============================
if __name__ == "__main__":
availInstances=['t2.micro','t2.nano','t2.small','t2.medium','t2.large','m4.large','m4.xlarge','m4.2xlarge','m4.4xlarge','m4.10xlarge','m4.16xlarge','m3.medium','m3.large','m3.xlarge','m3.2xlarge','c4.large','c4.xlarge','c4.2xlarge','c4.4xlarge','c4.8xlarge','c3.large','c3.xlarge','c3.2xlarge','c3.8xlarge','c3.4xlarge','c3.xlarge','r3.large','r3.xlarge','r3.2xlarge','r3.4xlarge','r3.8xlarge']
params=setupParserOptions()
if params['listInstance'] is True:
print 'Available instances:'
print availInstances
sys.exit()
#Need to check if they ask for p2 that they are in Oregon, Virginia, or Ireland
#Need to create directory for AMIs across regions. Right now, just US-East-1
keyName,keyPath,AMI,starpath=checkConflicts(params,availInstances)
configStarcluster(params,keyName,keyPath,AMI,starpath)
#FIGURE OUT CLUSTER NAMING SCHEME
clustername='cluster-%s-%0.f' %(params['instance'],time.time())
if params['spot'] == -1:
cmd='starcluster start %s' %(clustername)
subprocess.Popen(cmd,shell=True).wait()
if params['spot'] > 0:
cmd='starcluster start %s --bid=%f' %(clustername,params['spot'])
subprocess.Popen(cmd,shell=True).wait()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db.models import QuerySet
#from django.utils.encoding import python_2_unicode_compatible
from django.db.models import Q, F
from django.db.models.functions import Substr, Length
from django.db.models import CharField, OuterRef, Subquery
try:
from treebeard.models import Node as TreebeardNode
from treebeard.al_tree import AL_Node
from treebeard.ns_tree import NS_Node
from treebeard.mp_tree import MP_Node
HAS_TREEBEARD = True
except ImportError:
HAS_TREEBEARD = False
try:
from mptt.models import MPTTModel
HAS_MPTT = True
except ImportError:
HAS_MPTT = False
TREEBEARD = {
'root_level': 1,
'model': {
'order' : lambda model: model.node_order_by or []
},
'node': {
'parent' : lambda node: node.get_parent(),
'prev' : lambda node: node.get_prev_sibling(),
'next' : lambda node: node.get_next_sibling(),
'ancestors' : lambda node: force_treenode(node.get_ancestors()),
'descendants': lambda node: force_treenode(node.get_descendants()),
'level' : lambda node: node.get_depth(),
'move' : lambda node: lambda target, pos: node.move(target, pos),
'is_root' : lambda node: node.is_root(),
},
}
MPTT = {
'root_level': 0,
'model': {
'order' : lambda model: model._mptt_meta.order_insertion_by or []
},
'node': {
'parent' : lambda node: getattr(node, node.__class__._mptt_meta.parent_attr, None),
'prev' : lambda node: node.get_previous_sibling(),
'next' : lambda node: node.get_next_sibling(),
'ancestors' : lambda node: node.get_ancestors(),
'descendants': lambda node: node.get_descendants(),
'level' : lambda node: getattr(node, node.__class__._mptt_meta.level_attr, 0),
'move' : lambda node: lambda target, pos: node.move_to(target, pos),
'is_root' : lambda node: node.is_root_node(),
},
}
class UnknownTreeImplementation(Exception):
pass
def get_treetype(model):
"""
Return the function mapping of the real model tree implementation.
"""
if HAS_TREEBEARD and issubclass(model, TreebeardNode):
return TREEBEARD
elif HAS_MPTT and issubclass(model, MPTTModel):
return MPTT
raise UnknownTreeImplementation('cannot map tree implementation')
class TreeQuerySet(object):
"""
Abstract model tree queryset proxy. It's main purpose is to decorate queryset methods
in a way, that objects are returned as TreeNode proxy objects for
common tree attribute access. The real model tree implementation must be known and
`get_treetype` must return a method mapping for the real tree attributes
(see `MPTT` and `TREEBEARD` for example definitions).
The real queryset can be accessed via the `qs` attribute.
"""
def __init__(self, qs, treetype=None):
if isinstance(qs, TreeQuerySet):
self.qs = qs.qs
else:
self.qs = qs
self.treetype = treetype or get_treetype(self.qs.model)
def __getitem__(self, item):
item = self.qs[item]
if item:
return TreeNode(item, self.qs.model, self.treetype)
return item
def _get_next(self):
for node in self.qs:
yield TreeNode(node, self.qs.model, self.treetype)
def __iter__(self):
for node in self.qs:
yield TreeNode(node, self.qs.model, self.treetype)
def __next__(self):
return next(self)
def next(self):
return self.__next__()
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except AttributeError:
attr = self.qs.__getattribute__(name)
if callable(attr):
return self.proxy(attr)
return attr
def proxy(self, attr):
def f(*args, **kwargs):
res = attr(*args, **kwargs)
if isinstance(res, self.qs.__class__):
return TreeQuerySet(res, self.treetype)
if isinstance(res, self.qs.model):
return TreeNode(res, self.qs.model, self.treetype)
return res
return f
@property
def ordering(self):
return self.treetype['model']['order'](self.qs.model)
@property
def ordering_signs(self):
return [-1 if attr.startswith('-') else 1 for attr in self.ordering]
@property
def appmodel(self):
return '%s.%s' % (self.qs.model._meta.app_label,
self.qs.model._meta.model_name)
def annotate_parent(self):
if self.treetype == MPTT:
parent_field = self.qs.model._mptt_meta.parent_attr
return TreeQuerySet(self.qs.annotate(_parent_pk=F(parent_field+'__pk')))
elif self.treetype == TREEBEARD:
if issubclass(self.qs.model, NS_Node):
sub = self.qs.model.objects.filter(
tree_id=OuterRef('tree_id'),
lft__lt=OuterRef('lft'),
rgt__gt=OuterRef('rgt')).reverse()[:1]
qs = self.qs.annotate(_parent_pk=Subquery(sub.values('pk')))
return TreeQuerySet(qs)
elif issubclass(self.qs.model, MP_Node):
sub = self.qs.model.objects.filter(path=OuterRef('parentpath'))
expr = Substr('path', 1, Length('path') - self.qs.model.steplen,
output_field=CharField())
qs = self.qs.annotate(parentpath=expr).annotate(_parent_pk=Subquery(sub.values('pk')))
return TreeQuerySet(qs)
elif issubclass(self.qs.model, AL_Node):
return TreeQuerySet(
self.qs.annotate(_parent_pk=F('parent__pk')))
raise UnknownTreeImplementation('dont know how to annotate _parent_pk')
def get_ancestors_parent_annotated(self, include_self=False):
"""
Creates a queryset containing all parents of the queryset.
Also annotates the parent pk as `_parent_pk`.
"""
# django mptt got a ready to go method
if self.treetype == MPTT:
parent_field = self.qs.model._mptt_meta.parent_attr
return TreeQuerySet(
self.qs.get_ancestors(include_self=include_self)
.annotate(_parent_pk=F(parent_field+'__pk')))
# for treebeard we have to get the parents ourself
elif self.treetype == TREEBEARD:
if issubclass(self.qs.model, NS_Node):
filters = Q()
for node in self.qs:
if include_self:
filters |= Q(
tree_id=node.tree_id,
lft__lte=node.lft,
rgt__gte=node.rgt)
else:
filters |= Q(
tree_id=node.tree_id,
lft__lt=node.lft,
rgt__gt=node.rgt)
sub = self.qs.model.objects.filter(
tree_id=OuterRef('tree_id'),
lft__lt=OuterRef('lft'),
rgt__gt=OuterRef('rgt')).reverse()[:1]
qs = self.qs.model.objects.filter(filters)\
.annotate(_parent_pk=Subquery(sub.values('pk')))
return TreeQuerySet(qs)
elif issubclass(self.qs.model, MP_Node):
paths = set()
for node in self.qs:
length = len(node.path)
if include_self:
length += node.steplen
paths.update(node.path[0:pos]
for pos in range(node.steplen, length, node.steplen))
sub = self.qs.model.objects.filter(path=OuterRef('parentpath'))
expr = Substr('path', 1, Length('path') - self.qs.model.steplen,
output_field=CharField())
qs = self.qs.model.objects.filter(path__in=paths)\
.annotate(parentpath=expr)\
.annotate(_parent_pk=Subquery(sub.values('pk')))
return TreeQuerySet(qs)
elif issubclass(self.qs.model, AL_Node):
# worst for parent querying
# we have to walk all levels up to root
# adds roughly a one query per level
nodes = self.qs.select_related('parent')
pks = set()
parents = set()
for node in nodes:
if include_self:
pks.add(node.pk)
if node.parent:
parents.add(node.parent.pk)
missing = parents - pks
while missing:
pks.update(parents)
parents.clear()
for node in self.qs.model.objects.filter(
pk__in=missing).select_related('parent'):
if node.parent:
parents.add(node.parent.pk)
missing = parents - pks
return TreeQuerySet(
self.qs.model.objects.filter(pk__in=pks)
.annotate(_parent_pk=F('parent__pk')))
raise UnknownTreeImplementation('dont know how to annotate _parent_pk')
#@python_2_unicode_compatible
class TreeNode(object):
"""
Abstract tree node proxy for common tree attribute access.
The real tree node can be accessed via the `node` attribute.
NOTE: Only typical tree node attributes get abstracted,
if you need a specific value from a node, access it via `node`
(e.g. `obj.node.some_field`).
"""
def __init__(self, node, model=None, treetype=None):
self.node = node
self.model = model or self.node.__class__
self.treetype = treetype or get_treetype(self.model)
def _get_real(self, name):
res = self.treetype['node'][name](self.node)
if isinstance(res, QuerySet) and res.model == self.model:
return TreeQuerySet(res, self.treetype)
if isinstance(res, self.model):
return TreeNode(res, self.model, self.treetype)
return res
def __str__(self):
return '%s' % self.node
@property
def ordering(self):
return [getattr(self.node, attr.lstrip('-'))
for attr in self.treetype['model']['order'](self.model)]
@property
def parent(self):
return self._get_real('parent')
@property
def prev_sibling(self):
return self._get_real('prev')
@property
def next_sibling(self):
return self._get_real('next')
@property
def ancestors(self):
return self._get_real('ancestors')
@property
def descendants(self):
return self._get_real('descendants')
@property
def level(self):
return self._get_real('level')
@property
def move(self):
return self._get_real('move')
@property
def pk(self):
return self.node.pk
@property
def is_root(self):
return self._get_real('is_root')
def force_treenode(it):
"""
Helper function to enforce the content of a returned container
being TreeNode objects. This especially useful if a manager
or a queryet method returns a container with tree node objects
instead of a queryset.
"""
if isinstance(it, QuerySet):
return it
return (TreeNode(node) for node in it)
|
from connect4 import Connect4Board
from connect4 import GameState
from connect4 import Player
import numpy as np
import random
class ForwardSearchAgent:
name = None
depth = None
discount_factor = None
agent_two_val = None
agent_three_val = None
opp_two_val = None
opp_three_val = None
def __init__(self, name, depth=3, discount_factor=0.9, agent_two_val=60, agent_three_val=80, opp_two_val=-70, opp_three_val=-90):
self.name = name
self.depth = depth
self.discount_factor = discount_factor
self.agent_two_val = agent_two_val
self.agent_three_val = agent_three_val
self.opp_two_val = opp_two_val
self.opp_three_val = opp_three_val
def get_name(self):
return self.name
def get_action(self, player, game):
next_player = {Player.PLAYER_1 : Player.PLAYER_2, Player.PLAYER_2 : Player.PLAYER_1}
opp_player = Player.PLAYER_1 if player == Player.PLAYER_2 else Player.PLAYER_2
def uniform_random_opp_actions(game):
valid_actions = [action for action in range(game.NUM_COLS) if game.valid_action(action)]
transition_prob = 1.0 / len(valid_actions)
return {action: transition_prob for action in valid_actions}
def forward_search(game, curr_player, curr_depth):
# end of depth
if curr_depth == 0:
return None, 0
best_action = None
best_val = float("-inf") if curr_player == player else float("inf")
valid_actions = [action for action in range(game.NUM_COLS) if game.valid_action(action)]
random.shuffle(valid_actions)
for action in valid_actions:
val = val_function(game)
after_move_game = game.add_piece(curr_player, action)
if after_move_game.check_draw():
val = 0
elif after_move_game.check_win(player):
val = 100000000
elif after_move_game.check_win(opp_player):
val = -100000000
else:
_, next_val = forward_search(after_move_game, next_player[curr_player], curr_depth-1 if curr_player == opp_player else curr_depth)
val += self.discount_factor * next_val
if curr_player == player and val > best_val or curr_player == opp_player and val < best_val:
best_val = val
best_action = action
return best_action, best_val
def val_function(game):
score = 0
for row in range(game.NUM_ROWS):
for col in range(game.NUM_COLS):
if col + 3 < game.NUM_COLS:
series = [game.board[row][col+i] for i in range(4)]
score += val_four_helper(series, player, opp_player)
if row + 3 < game.NUM_ROWS:
series = [game.board[row+i][col] for i in range(4)]
score += val_four_helper(series, player, opp_player)
if row + 3 < game.NUM_ROWS and col + 3 < game.NUM_COLS:
series = [game.board[row+i][col+i] for i in range(4)]
score += val_four_helper(series, player, opp_player)
if row + 3 < game.NUM_ROWS and col - 3 >= 0:
series = [game.board[row+i][col-i] for i in range(4)]
score += val_four_helper(series, player, opp_player)
return score
def val_four_helper(series, agent_player, opp_player):
if series.count(agent_player) == 4:
return 100000
if series.count(agent_player) == 3 and series.count(Player.NONE) == 1:
return self.agent_three_val
if series.count(agent_player) == 2 and series.count(Player.NONE) == 2:
return self.agent_two_val
if series.count(opp_player) == 2 and series.count(Player.NONE) == 2:
return self.opp_two_val
if series.count(opp_player) == 3 and series.count(Player.NONE) == 1:
return self.opp_three_val
if series.count(opp_player) == 4:
return -100000
return 0
action,_ = forward_search(game, player, self.depth)
return action
|
import tempfile
from dagster import ResourceDefinition, fs_io_manager, mem_io_manager
from dagster_pyspark import pyspark_resource
from hacker_news_assets.pipelines.download_pipeline import download_comments_and_stories_dev
from hacker_news_assets.resources.hn_resource import hn_snapshot_client
from hacker_news_assets.resources.parquet_io_manager import partitioned_parquet_io_manager
def test_download():
with tempfile.TemporaryDirectory() as temp_dir:
result = download_comments_and_stories_dev.graph.execute_in_process(
run_config={
"resources": {
"partition_start": {"config": "2020-12-30 00:00:00"},
"partition_end": {"config": "2020-12-30 01:00:00"},
"parquet_io_manager": {"config": {"base_path": temp_dir}},
}
},
resources={
"io_manager": fs_io_manager,
"partition_start": ResourceDefinition.string_resource(),
"partition_end": ResourceDefinition.string_resource(),
"parquet_io_manager": partitioned_parquet_io_manager,
"warehouse_io_manager": mem_io_manager,
"pyspark": pyspark_resource,
"hn_client": hn_snapshot_client,
},
)
assert result.success
|
import numpy as np
import networkx as nx
import cPickle as cp
import random
import ctypes
import os
import sys
from tqdm import tqdm
sys.path.append( '%s/tsp2d_lib' % os.path.dirname(os.path.realpath(__file__)) )
from tsp2d_lib import Tsp2dLib
n_valid = 100
def find_model_file(opt):
max_n = int(opt['max_n'])
min_n = int(opt['min_n'])
log_file = None
if max_n < 100:
return None
if min_n == 100 and max_n == 200:
n1 = 50
n2 = 100
else:
n1 = min_n - 100
n2 = max_n - 100
log_file = '%s/log-%d-%d.txt' % (opt['save_dir'], n1, n2)
if not os.path.isfile(log_file):
return None
best_r = 1000000
best_it = -1
with open(log_file, 'r') as f:
for line in f:
if 'average' in line:
line = line.split(' ')
it = int(line[1].strip())
r = float(line[-1].strip())
if r < best_r:
best_r = r
best_it = it
if best_it < 0:
return None
return '%s/nrange_%d_%d_iter_%d.model' % (opt['save_dir'], n1, n2, best_it)
def PrepareGraphs(isValid):
if isValid:
n_graphs = 100
prefix = 'validation_tsp2d'
else:
n_graphs = 10000
prefix = 'train_tsp2d'
folder = '%s/%s/tsp_min-n=%s_max-n=%s_num-graph=%d_type=%s' % (opt['data_root'], prefix, opt['min_n'], opt['max_n'], n_graphs, opt['g_type'])
with open('%s/paths.txt' % folder, 'r') as f:
for line in tqdm(f):
fname = '%s/%s' % (folder, line.split('/')[-1].strip())
coors = {}
in_sec = False
n_nodes = -1
with open(fname, 'r') as f_tsp:
for l in f_tsp:
if 'DIMENSION' in l:
n_nodes = int(l.split(' ')[-1].strip())
if in_sec:
idx, x, y = [int(w.strip()) for w in l.split(' ')]
coors[idx - 1] = [float(x) / 1000000.0, float(y) / 1000000.0]
assert len(coors) == idx
elif 'NODE_COORD_SECTION' in l:
in_sec = True
assert len(coors) == n_nodes
g = nx.Graph()
g.add_nodes_from(range(n_nodes))
nx.set_node_attributes(g, 'pos', coors)
api.InsertGraph(g, is_test=isValid)
if __name__ == '__main__':
api = Tsp2dLib(sys.argv)
opt = {}
for i in range(1, len(sys.argv), 2):
opt[sys.argv[i][1:]] = sys.argv[i + 1]
model_file = find_model_file(opt)
if model_file is not None:
print 'loading', model_file
sys.stdout.flush()
api.LoadModel(model_file)
PrepareGraphs(isValid=True)
PrepareGraphs(isValid=False)
# startup
for i in range(10):
api.lib.PlayGame(100, ctypes.c_double(1.0))
api.TakeSnapshot()
eps_start = 1.0
eps_end = 1.0
eps_step = 10000.0
api.lib.SetSign(1)
lr = float(opt['learning_rate'])
for iter in range(int(opt['max_iter'])):
eps = eps_end + max(0., (eps_start - eps_end) * (eps_step - iter) / eps_step)
if iter % 10 == 0:
api.lib.PlayGame(10, ctypes.c_double(eps))
if iter % 100 == 0:
frac = 0.0
for idx in range(n_valid):
frac += api.lib.Test(idx)
print 'iter', iter, 'lr', lr, 'eps', eps, 'average tour length: ', frac / n_valid
sys.stdout.flush()
model_path = '%s/nrange_%d_%d_iter_%d.model' % (opt['save_dir'], int(opt['min_n']), int(opt['max_n']), iter)
api.SaveModel(model_path)
if iter % 1000 == 0:
api.TakeSnapshot()
lr = lr * 0.95
api.lib.Fit(ctypes.c_double(lr))
|
"""
This module is specifically intended for use when in environments where
you're actively trying to share/develop tools across multiple applications
which support PyQt, PySide or PySide2.
The premise is that you can request the main application window using
a common function regardless of the actual application - making it trivial
to implement a tool which works in multiple host applications without any
bespoke code.
The current list of supported applications are:
* Native Python
* Maya
* 3dsmax
* Motion Builder
"""
import sys
from ..vendor import Qt
# Python 2/3 compat
# TODO: Use six.
try:
long
except NameError:
long = int
# ------------------------------------------------------------------------------
def get_host():
global HOST
if HOST:
pass
elif ('maya.exe' in sys.executable or
'mayapy.exe' in sys.executable):
HOST = 'Maya'
elif ('motionbuilder.exe' in sys.executable or
'mobupy.exe' in sys.executable):
HOST = 'Mobu'
elif '3dsmax.exe' in sys.executable:
HOST = 'Max'
elif any(houdini_exec in sys.executable
for houdini_exec in ['houdini.exe',
'houdinifx.exe',
'houdinicore.exe']):
HOST = 'Houdini'
return HOST
# ------------------------------------------------------------------------------
# noinspection PyPep8Naming
def mainWindow():
"""
Returns the main window regardless of what the host is
:return:
"""
return HOST_MAPPING[get_host()]()
# ------------------------------------------------------------------------------
# noinspection PyUnresolvedReferences,PyPep8Naming
def returnNativeWindow():
for candidate in Qt.QtWidgets.QApplication.topLevelWidgets():
if isinstance(candidate, Qt.QtWidgets.QMainWindow):
return candidate
# ------------------------------------------------------------------------------
# noinspection PyUnresolvedReferences,PyPep8Naming
def _findWindowByTitle(title):
# -- Find the main application window
for candidate in Qt.QtWidgets.QApplication.topLevelWidgets():
# noinspection PyBroadException
try:
if title in candidate.windowTitle():
return candidate
except Exception:
pass
# ------------------------------------------------------------------------------
# noinspection PyPep8Naming
def returnModoMainWindow():
pass
# ------------------------------------------------------------------------------
# noinspection PyPep8Naming
def returnMaxMainWindow():
return _findWindowByTitle('Autodesk 3ds Max')
# ------------------------------------------------------------------------------
# noinspection PyUnresolvedReferences,PyPep8Naming
def returnMayaMainWindow():
from maya import OpenMayaUI as omui
return Qt.QtCompat.wrapInstance(
long(omui.MQtUtil.mainWindow()),
Qt.QtWidgets.QWidget,
)
# ------------------------------------------------------------------------------
# noinspection PyPep8Naming
def returnHoudiniMainWindow():
import hou
return hou.qt.mainWindow()
# ------------------------------------------------------------------------------
# noinspection PyPep8Naming
def returnMobuMainWindow():
return _findWindowByTitle('MotionBuilder 20')
# ------------------------------------------------------------------------------
HOST = None
HOST_MAPPING = {
None: returnNativeWindow,
'Maya': returnMayaMainWindow,
'Max': returnMaxMainWindow,
'Modo': returnModoMainWindow,
'Mobu': returnMobuMainWindow,
'Houdini': returnHoudiniMainWindow,
}
|
# -*- coding: utf-8
"""
Web-Interface for JabRef library.
It lists the entries in a given JabRef library. It provides a simple
search bar to filter for those entries your looking for. Currently, it
provides read-only access to the library without any possibility to modify
existing entries or to add new ones.
"""
from flask import Flask, render_template, request, send_from_directory
from flask_sqlalchemy import SQLAlchemy
from flask_sqlalchemy import sqlalchemy as sa
from ppf.jabref import Entry, Field, split_by_unescaped_sep
from pathlib import Path
# Credential management
#
# This application is meant to run inside a docker container.
# Docker provides a mechanism to manage secrets. The user
# creates a container, adds a (named) secret, and runs the
# container.
# Inside the container, the named secret is available in the text file
# /run/secrets/<secret-name>.
sqlusername = open('/run/secrets/sqlusername').readline().strip()
sqlpassword = open('/run/secrets/sqlpassword').readline().strip()
sqlserver = open('/run/secrets/sqlserver').readline().strip()
sqldatabasename = open('/run/secrets/sqldatabasename').readline().strip()
app = Flask(__name__,
static_url_path='',
static_folder='static')
app.config['SQLALCHEMY_DATABASE_URI'] = ('mysql+pymysql://'
f'{sqlusername}:{sqlpassword}'
f'@{sqlserver}/{sqldatabasename}')
db = SQLAlchemy(app)
@app.route('/')
def root():
"""Show WebApp."""
return app.send_static_file('index.html')
@app.route('/references/<path:path>')
def send_reference(path):
"""Send reference."""
return send_from_directory('references', path)
@app.route('/loadEntries.php', methods=['POST'])
def loadEntries():
"""Return entries from library matching search expression."""
searchexpr = request.form.get('searchexpr')
patternmatchingQ = (sa.select(Field.entry_shared_id)
.where(Field.value.op('rlike')(searchexpr))
.distinct())
entryQ = (sa.select(Entry)
.where(Entry.shared_id.in_(patternmatchingQ)))
entries = [{f: entry[0].fields.get(f, None)
for f in ['author', 'title', 'year', 'file']}
for entry in db.session.execute(entryQ)]
basepath = Path('references')
for entry in entries:
if entry['file'] is not None:
filepath = Path(split_by_unescaped_sep(entry['file'])[1])
entry['file'] = basepath / filepath
if not entry['file'].exists() or filepath.is_absolute():
entry['file'] = None
return render_template('entry_table.tmpl', entries=entries)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
from django.urls import path, include
from django.contrib import admin
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView,
urlpatterns = [
path('admin/', admin.site.urls),
# dj-rest-auth endpoints
path('dj-rest-auth/', include('dj_rest_auth.urls')),
path('dj-rest-auth/registration/', include('dj_rest_auth.registration.urls')),
# django-rest-framework-simplejwt endpoints
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh')
]
|
from __future__ import absolute_import
from pyti import catch_errors
from pyti.exponential_moving_average import (
exponential_moving_average as ema
)
def price_oscillator(data, short_period, long_period):
"""
Price Oscillator.
Formula:
(short EMA - long EMA / long EMA) * 100
"""
catch_errors.check_for_period_error(data, short_period)
catch_errors.check_for_period_error(data, long_period)
ema_short = ema(data, short_period)
ema_long = ema(data, long_period)
po = ((ema_short - ema_long) / ema_long) * 100
return po
|
from copy import deepcopy
from random import randint, uniform, choice
class EAConfig:
"""
This class sets up the parameters for EA
"""
def __init__(self,
mut = 0.30,
cross = 0.30,
cand_size = 10,
max_cand_value = 8, # for real and int representation
rep = 0, # 0 = binary, 1 = integer, else = real
pop_size = 10, # min = 2
max_gen = 3
):
"""
:param mut: float, rate at which a mutation occurs
:param cross: float, rate at which crossover occurs
:param cand_size: int, size of the candidate representation
:param max_cand_value: int or float, the max value that a value from the
representation can take, for int or real representations only
:param rep: int, defines the type of representation, 0 for binary, 1 for integer
and anything else for real
:param pop_size: int, defines the size of a population in the EA algorithm
:param max_gen: int, defines the max number of generations in the EA
"""
self.mutation_rate = mut
self.crossover_rate = cross
self.cand_size = cand_size
self.max_cand_value = max_cand_value
self.rep_type = rep
self.pop_size = pop_size
self.tourn_size = 2 + round(self.pop_size*0.05)
self.max_gen = max_gen
self.scoredic = {}
self.val_dic = {}
self.fit_dic = {}
def __str__(self):
configdic = {
"mut": self.mutation_rate,
"cross": self.crossover_rate,
"cand size": self.cand_size,
"max_cand": self.max_cand_value,
"rep type": self.rep_type,
"pop size": self.pop_size,
"tourn size": self.tourn_size,
"max_gen": self.max_gen,
# "scoredic": self.scoredic,
}
return str(configdic)
class Candidate:
"""
Class to represent each candidate in a population, is filled with a
representation upon being generated a population and the score for
that representation after being evaluated
"""
def __init__(self, rep):
"""
:param rep: list of int or list of float, depending on the rep_type
"""
self.rep = rep # candidate representation
self.score = None # filled during candidate evaluation
self.values = None # filled during candidate evaluation
self.fit_list = None # filled during candidate evaluation
def __str__(self):
return str("{}: {}".format(self.rep, self.score))
def update(self):
"""
updates the candidate information, used when the candidate has already
been evaluated
:return: nothing
"""
self.score = config.scoredic[str(self.rep)]
self.fit_list = config.fit_dic[str(self.rep)]
self.values = config.val_dic[str(self.rep)]
def set_cand_values(self, fit_list, val, score):
"""
sets the values of a candidate object
:param fit_list: list of values relative to the fitness reaction
:param val: list of all values
:param score: evaluation score for fitness
:return: nothing
"""
config.fit_dic[str(self.rep)] = fit_list
self.fit_list = config.fit_dic[str(self.rep)]
config.val_dic[str(self.rep)] = val
self.values = config.val_dic[str(self.rep)]
config.scoredic[str(self.rep)] = score
self.score = config.scoredic[str(self.rep)]
config = EAConfig()
def change_config(
pop_size = None,
max_gen = None,
cand = None,
rep = None,
max_val = None,
mut = None,
cross = None
):
"""
used to change the values of the EA parameters
:param pop_size: int, defines the size of a population in the EA algorithm
:param max_gen: int, defines the max number of generations in the EA
:param cand: int, size of the candidate representation
:param rep: int, defines the type of representation, 0 for binary, 1 for integer
and anything else for real
:param max_val: int or float, the max value that a value from the
representation can take, for int or real representations only
:param mut: float, rate at which a mutation occurs
:param cross: float, rate at which crossover occurs
:return: nothing
"""
if pop_size:
config.pop_size = pop_size
if max_gen:
config.max_gen = max_gen
if cand:
config.cand_size = cand
if rep or rep == 0:
config.rep_type = rep
if max_val:
config.max_cand_value = max_val
if mut or mut == 0:
config.mutation_rate = mut
if cross or cross == 0:
config.crossover_rate = cross
def reset_config():
"""
resets (empties) the dictionary parameters of the EAConfig object
:return: nothing
"""
config.scoredic = {}
config.val_dic = {}
config.fit_dic = {}
def binary_representation():
"""
creates a binary representation
:return: a list of random binary values, with at least one 1
"""
rep = [randint(0, 1) for _ in range(config.cand_size)]
if sum(rep) == 0:
rep = binary_representation()
return rep
def int_representation():
"""
creates an integer representation
:return: a list of sorted non repeated random integers
"""
int_rep = sorted([randint(0, config.max_cand_value) for _ in range(config.cand_size)])
if len(int_rep) == len(set(int_rep)):
return int_rep
else:
return int_representation()
def real_representation():
"""
creates a real representation
:return: a list of random real values
"""
return sorted([uniform(0, config.max_cand_value) for _ in range(config.cand_size)])
def binary_to_int_rep(rep):
"""
converts binary representations to integer format by creating a list
of the indexes of the zeros in the original representation
:param rep: list of binary values, representation in binary
:return: representation in integer
"""
return [i for i in range(len(rep)) if rep[i] == 0]
def inverse_int_rep(int_rep):
"""
converts an integer representation with the values possible that are not
present in the original
:param int_rep: list of integers
:return: list of integers, the inverse representation of the one introduced
"""
value = 0
if config.rep_type == 0:
value = config.cand_size
elif config.rep_type == 1:
value = config.max_cand_value + 1
new_rep = []
for ind in range(value):
if ind not in int_rep:
new_rep.append(ind)
return new_rep
def bit_flip_mutation_binary(candidate, pos = None):
"""
alters a random or selected binary value in the representation of a candidate
:param candidate: candidate object
:param pos: mutation index, autogenerated if not present
:return: candidate object, mutated
"""
rep = candidate.rep.copy()
if (not pos) and (pos != 0):
pos = randint(0, len(rep)-1)
if rep[pos] == 0:
rep[pos] = 1
elif rep[pos] == 1:
rep[pos] = 0
if sum(rep) == 0:
return candidate
return Candidate(rep)
def bit_flip_mutation_int(candidate, pos = None):
"""
alters a random or selected integer value in the representation of a candidate,
if the result has duplicate values, returns the original
:param candidate: candidate object
:param pos: mutation index, autogenerated if not present
:return: candidate object, mutated
"""
rep = candidate.rep.copy()
if (not pos) and (pos != 0):
pos = randint(0, len(rep)-1)
rep[pos] = randint(0, config.max_cand_value)
if len(rep) == len(set(rep)):
return Candidate(sorted(rep))
else:
return candidate
def bit_flip_mutation_real(candidate, pos = None):
"""
alters a random or selected real value in the representation of a candidate
:param candidate: candidate object
:param pos: mutation index, autogenerated if not present
:return: candidate object
"""
rep = candidate.rep.copy()
if (not pos) and (pos != 0):
pos = randint(0, len(rep)-1)
rep[pos] = uniform(0, config.max_cand_value)
return Candidate(sorted(rep))
def one_point_crossover(par1, par2):
"""
parts the representation of two parent candidates at a random point and
creates two new candidates with the beginning of one and the end of another parent
if one of the new candidate representations has duplicate values (for non binary
rep_type) it returns the original candidates instead
:param par1: candidate object
:param par2: candidate object
:return: two candidate objects
"""
pos = randint(0, len(par1.rep)-1)
rep_child1 = par1.rep[:pos]+par2.rep[pos:]
rep_child2 = par2.rep[:pos]+par1.rep[pos:]
if config.rep_type == 0:
return Candidate(rep_child1), Candidate(rep_child2)
elif config.rep_type != 0:
if (len(rep_child1) == len(set(rep_child1))) and (len(rep_child2) == len(set(rep_child2))):
return Candidate(sorted(rep_child1)), Candidate(sorted(rep_child2))
else:
return par1, par2
def uniform_crossover(par1, par2):
"""
it creates two new candidates, for every index of the candidate representations,
it will randomly assign the value to one of the new candidates
if the new candidates have duplicate values, it returns the original candidates
:param par1: candidate object
:param par2: candidate object
:return: two candidate objects
"""
rep_child1 = []
rep_child2 = []
for i in range(len(par1.rep)):
j = randint(0, 1)
if j == 0:
rep_child1.append(par1.rep[i])
rep_child2.append(par2.rep[i])
if j == 1:
rep_child1.append(par2.rep[i])
rep_child2.append(par1.rep[i])
if config.rep_type == 0:
return Candidate(rep_child1), Candidate(rep_child2)
elif config.rep_type != 0:
if (len(rep_child1) == len(set(rep_child1))) and (len(rep_child2) == len(set(rep_child2))):
return Candidate(sorted(rep_child1)), Candidate(sorted(rep_child2))
else:
return par1, par2
def generate_random_popu():
"""
creates a list of candidates with random representations according to the
previously defined rep_type
:return: a list of candidate objects
"""
populist = []
for _ in range(config.pop_size):
if config.rep_type == 0:
rep = binary_representation()
elif config.rep_type == 1:
rep = int_representation()
else:
rep = real_representation()
cand = Candidate(rep)
populist.append(cand)
return populist
def generate_headstart_popu(sample):
"""
generates a semi-random population given a sample to start from.
all the generated candidates will have, at least, the indexes present in the sample
:param sample: a candidate representation, smaller than the cand_size variable
:return: a list of candidate objects
"""
populist = []
for _ in range(config.pop_size):
if config.rep_type == 0:
rep = deepcopy(sample)
for i in range(len(rep)):
if rep[i] == 0:
rep[i] = randint(0, 1)
elif config.rep_type == 1:
rep = deepcopy(sample)
while len(set(rep)) < config.cand_size:
rep.append(randint(0, config.max_cand_value))
rep = sorted(list(set(rep)))
else:
rep = deepcopy(sample)
while len(set(rep)) > config.cand_size:
rep.append(uniform(0, config.max_cand_value))
rep = sorted(list(set(rep)))
cand = Candidate(rep)
populist.append(cand)
return populist
def new_popu_tourn(old_popu):
"""
repeatedly selects the best candidate out of 15 randomly chosen two times
to create two new candidates, that are added to a new list, until it reaches
the desired size
:param old_popu: list of candidate objects
:return: list of candidate objects
"""
new_popu = []
keep_best = 0
best_cand = None
for cand in old_popu:
if cand.score > keep_best:
keep_best = cand.score
best_cand = cand
if best_cand:
new_popu.append(best_cand)
while len(new_popu) < config.pop_size:
par1 = select_candidate(old_popu)
par2 = select_candidate(old_popu)
sib1, sib2 = maybe_crossover(par1, par2)
sib1 = maybe_mutate(sib1)
sib2 = maybe_mutate(sib2)
new_popu.append(sib1)
new_popu.append(sib2)
new_popu = new_popu[:config.pop_size]
return new_popu
def new_popu_changeworst(old_popu, quantity):
"""
generates a new list of candidates by changing the x number of members
that least contribute to the overall fitness
:param old_popu: a list of candidate objects
:param quantity: the number of members to be changed in each candidate
:return: a list of candidates
"""
new_popu = []
keep_best = 0
for cand in old_popu:
if cand.score > keep_best:
keep_best = cand.score
for cand in old_popu:
worst_quantity = quantity
new_cand = deepcopy(cand)
if new_cand.score == keep_best:
pass
else:
if config.rep_type == 0: # when change worst quantity is bigger
if sum(cand.rep) - 1 <= quantity: # than the number of organisms in the candidate
worst_quantity = sum(cand.rep) - 1 # this changes the value to one less than the cand size
worst = find_worst(cand.fit_list, worst_quantity)
for i in worst:
if config.rep_type == 0:
new_cand = bit_flip_mutation_binary(new_cand, i)
elif config.rep_type == 1:
new_cand = bit_flip_mutation_int(new_cand, i)
else:
new_cand = bit_flip_mutation_real(new_cand, i)
reverse_worst = inverse_int_rep(worst)
keep_mut_rate = deepcopy(config.mutation_rate)
change_config(mut = 0.15)
if config.rep_type == 0:
for i in reverse_worst:
new_cand = maybe_mutate(cand, i)
change_config(mut = keep_mut_rate)
new_popu.append(new_cand)
return new_popu
def find_worst(list_of_values, quantity):
"""
auxiliary function to changeworst,
finds the indexes of the worst performing members
:param list_of_values: list of values relative to the members of the candidate
used to determine which is the worst performing ones
:param quantity: the quantity of worst members
:return: a list with indexes of the worst candidates, to be eliminated
"""
if len(list_of_values) < quantity:
raise Exception("Quantity should be lower than the number of models present.")
worst_list = sorted([i for i in list_of_values if i])[:quantity]
worst_ind = []
for worst in worst_list:
for i in range(len(list_of_values)):
if list_of_values[i] == worst:
worst_ind.append(i)
return list(set(worst_ind))
def new_popu_keep_headstart_tourn(old_popu, sample):
"""
generates a new population by tournament and after alters the candidates
to include specific members
:param old_popu: a list of candidate objects
:param sample: a candidate representation, smaller than cand_size
:return: a list of candidate objects
"""
new_popu = new_popu_tourn(old_popu)
for cand in new_popu:
if config.rep_type == 0:
for i in range(len(sample)):
if sample[i] == 1:
cand.rep[i] = 1
else:
to_choose_from = [i for i in cand.rep if i not in sample]
new_rep = deepcopy(sample)
while len(set(new_rep)) < config.cand_size:
new_rep.append(choice(to_choose_from))
cand.rep = sorted(list(set(new_rep)))
return new_popu
def select_candidate(popu):
"""
selects a number of random candidates and returns the one from those with
the best score
:param popu: a list of candidate objects
:return: candidate object, the candidate with the best score
"""
cands = [randint(0, config.pop_size - 1) for _ in range(config.tourn_size)]
bestcand = []
bestcandscore = -99999
for i in cands:
if popu[i].score > bestcandscore:
bestcandscore = popu[i].score
bestcand = popu[i]
return bestcand
def maybe_crossover(par1, par2):
"""
determines randomly whether and which crossover occurs
:param par1: candidate object
:param par2:candidate object
:return: two candidate objects
"""
randval = uniform(0, 1)
if randval > config.crossover_rate:
return par1, par2
if randval < config.crossover_rate:
return uniform_crossover(par1, par2)
else:
return one_point_crossover(par1, par2)
def maybe_mutate(cand, pos = None):
"""
determines randomly whether mutation occurs
:param cand: candidate object
:param pos: index position if necessary
:return: candidate object
"""
randval = uniform(0, 1)
if randval < config.mutation_rate:
return cand
else:
if config.rep_type == 0:
return bit_flip_mutation_binary(cand, pos)
elif config.rep_type == 1:
return bit_flip_mutation_int(cand, pos)
else:
return bit_flip_mutation_real(cand, pos)
def sample_size_check(option_list, quantity):
"""
raises errors if the size of the sample is incoherent with the chosen options
:param option_list: option list parameter used in ea_run
:param quantity: quantity parameter used in ea_run
:return: nothing, raises errors when detected
"""
if (option_list[0][0] == "headstart") or (option_list[0][0] == 1):
if quantity == 0:
if len(option_list[0][1]) != config.cand_size:
raise Exception("Sample must have same length as candidate size.")
if quantity > 0:
if len(option_list[0][1]) > quantity:
raise Exception("Sample length must not be lower than quantity.")
if (option_list[1][0] == "keep") or (option_list[1][0] == 2):
if quantity == 0:
if len(option_list[1][1]) != config.cand_size:
raise Exception("Sample must have same length as candidate size.")
if quantity > 0:
if len(option_list[1][1]) > quantity:
raise Exception("Sample length must not be lower than quantity.")
def create_constraints(reac_list, lb = 0, up = 0):
"""
creates a dictionary of constraints ready to be used on other functions that use fba
:param reac_list: list of str, list of reaction ids to be constrained
:param lb: int or float, value of the lower bound
:param up: int or float, value of the upper bound
:return: dict, a dictionary with reaction ids as keys, and tuples of lower and upper
bounds as values
"""
if lb > up:
raise Exception("Lower bound must be lower than upper bound")
cons_dic = {}
for reac in reac_list:
cons_dic[reac] = (lb, up)
return cons_dic
def get_predecessor_reacs(model, reac_id):
"""
recovers the reactions that produce the metabolites used in the input reaction
:param model: framed model object
:param reac_id: str, reaction id
:return: list of str reaction ids
"""
res_list = []
target_reac = model.reactions[reac_id]
subs = target_reac.get_substrates()[0]
for reaction in model.reactions:
products = model.reactions[reaction].get_products()
if subs in products:
res_list.append(reaction)
return res_list
def get_fit_reac_values(cmodel, val, fit_reacs, indexes):
"""
this function takes a CModel object, a list its respective flux values,
and a list of reactions of which the values are to be retrieved
and returns the values in a list
:param cmodel: cmodel object
:param val: values parameter from solution object
:param fit_reacs: reactions related with the fitness evaluation
:param indexes: indexes of the individuals present
:return: a list of the values related to the reactions in fit_reacs
"""
relevant_fit_values = []
target_ids = [cmodel.models[i].id for i in indexes]
relevant_fit_reacs = [0 for _ in target_ids]
for ind in range(len(target_ids)):
for fit_reac in fit_reacs:
if fit_reac.endswith(target_ids[ind]):
relevant_fit_reacs[ind] = fit_reac
for reac in relevant_fit_reacs:
if reac:
relevant_fit_values.append(val[reac])
else:
relevant_fit_values.append(0)
return relevant_fit_values
if __name__ == '__main__':
print(binary_representation())
|
# Generated by Django 2.0.2 on 2018-05-02 14:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('card_id', models.CharField(max_length=100, verbose_name='Kortnummer')),
('image', models.ImageField(blank=True, upload_to='profile', verbose_name='Profilbilde')),
('on_make', models.BooleanField(default=False, verbose_name='Innsjekkingsstatus')),
('last_checkin', models.DateTimeField(auto_now=True, verbose_name='Sist sjekket inn')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RegisterProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('card_id', models.CharField(max_length=100, verbose_name='Kortnummer')),
('last_scan', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True, verbose_name='Ferdighet')),
('image', models.ImageField(blank=True, upload_to='skills', verbose_name='Ferdighetbilde')),
],
),
migrations.CreateModel(
name='SuggestSkill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True, verbose_name='Foreslått ferdighet')),
('approved', models.BooleanField(default=False)),
('image', models.ImageField(blank=True, upload_to='skills', verbose_name='Ferdighetbilde')),
('creator', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='suggestions', to='checkin.Profile')),
('voters', models.ManyToManyField(related_name='votes', to='checkin.Profile')),
],
options={
'ordering': ('title',),
},
),
migrations.CreateModel(
name='UserSkill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('skill_level', models.IntegerField(choices=[(1, 'Nybegynner'), (2, 'Viderekommen'), (3, 'Ekspert')])),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkin.Profile')),
('skill', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkin.Skill')),
],
options={
'ordering': ('skill__title',),
},
),
]
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Task Python."""
import inspect
import logging
import re
import types
from typing import Union
from pydolphinscheduler.constants import TaskType
from pydolphinscheduler.core.task import Task
from pydolphinscheduler.exceptions import PyDSParamException
log = logging.getLogger(__file__)
class Python(Task):
"""Task Python object, declare behavior for Python task to dolphinscheduler.
Python task support two types of parameters for :param:``code``, and here is an example:
Using str type of :param:``code``
.. code-block:: python
python_task = Python(name="str_type", code="print('Hello Python task.')")
Or using Python callable type of :param:``code``
.. code-block:: python
def foo():
print("Hello Python task.")
python_task = Python(name="str_type", code=foo)
:param name: The name for Python task. It define the task name.
:param definition: String format of Python script you want to execute or Python callable you
want to execute.
"""
_task_custom_attr = {
"raw_script",
}
def __init__(
self, name: str, definition: Union[str, types.FunctionType], *args, **kwargs
):
super().__init__(name, TaskType.PYTHON, *args, **kwargs)
self.definition = definition
def _build_exe_str(self) -> str:
"""Build executable string from given definition.
Attribute ``self.definition`` almost is a function, we need to call this function after parsing it
to string. The easier way to call a function is using syntax ``func()`` and we use it to call it too.
"""
if isinstance(self.definition, types.FunctionType):
py_function = inspect.getsource(self.definition)
func_str = f"{py_function}{self.definition.__name__}()"
else:
pattern = re.compile("^def (\\w+)\\(")
find = pattern.findall(self.definition)
if not find:
log.warning(
"Python definition is simple script instead of function, with value %s",
self.definition,
)
return self.definition
# Keep function str and function callable always have one blank line
func_str = (
f"{self.definition}{find[0]}()"
if self.definition.endswith("\n")
else f"{self.definition}\n{find[0]}()"
)
return func_str
@property
def raw_script(self) -> str:
"""Get python task define attribute `raw_script`."""
if isinstance(self.definition, (str, types.FunctionType)):
return self._build_exe_str()
else:
raise PyDSParamException(
"Parameter definition do not support % for now.", type(self.definition)
)
|
from flask import Flask, render_template_string, send_from_directory
from modules import stats_db, make_plot
app = Flask(__name__)
@app.route('/<path:filename>')
def send_public_file(filename):
return send_from_directory('public/', filename)
@app.route('/')
def index():
# language=html
template = '''
<html lang="en">
<head>
<title>Test Scripts</title>
<script src="plotly-latest.min.js"></script>
</head>
<body>
<div>{{ sys_info }}</div>
<div id="temp-humid-graph" style="width:800px;height:400px;"></div>
<img src="{{ plot1 }}" alt="plot1"/>
<img src="{{ plot2 }}" alt="plot2"/>
<img src="{{ plot3 }}" alt="plot3"/>
<script>
var elem = document.getElementById('temp-humid-graph');
Plotly.plot(
elem,
[{
x: {{ temp_x }},
y: {{ temp_y }}
},
{
x: {{ humid_x }},
y: {{ humid_y }}
}
], {
margin: { t: 0 } } );
</script>
</body>
</html>
'''
data1 = stats_db.fetch_dht_all()
info = str(len(data1))
x = [x[0] for x in data1]
y_temp = [x[1] for x in data1]
y_humid = [x[2] for x in data1]
plot1 = make_plot.generate_base64_plot(x, y_temp, 'Temperature', 'time', 'C')
plot2 = make_plot.generate_base64_plot(x, y_humid, "Humidity", "time", "%")
data2 = stats_db.fetch_cpu_all()
x2 = [x[0] for x in data2]
y_cpu = [x[1] for x in data2]
plot3 = make_plot.generate_base64_plot(x2, y_cpu, 'Cpu', 'time', '%')
return render_template_string(template, sys_info=info, plot1=plot1, plot2=plot2, plot3=plot3,
temp_x=str(x), temp_y=str(y_temp), humid_x=str(x), humid_y=str(y_humid))
@app.after_request
def set_response_headers(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '0'
return response
app.run('localhost', 8000)
|
from django.urls import path
from .views import Registrarse, equipoView
urlpatterns = [
path('signup/',Registrarse.as_view() , name='registrarse'),
path('equipo/',equipoView , name='equipo'),
]
|
import os
import random
from mnist import MNIST
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# rounded_binarized - convert trainig dataset to arrays of 0,1
#mndata = MNIST('mnist_dataset', mode="rounded_binarized")
mndata = MNIST('mnist_dataset')
print('Loading training dataset...')
images, labels = mndata.load_training()
#images, labels = mndata.load_testing()
"""
while True:
index = random.randrange(0, len(images)) # choose an index ;-)
print(mndata.display(images[index], threshold = 0))
#print(mndata.display(images[index], threshold = 0)) # for binarized
os.system('cls')
"""
print(labels[1])
print(images[1])
print(mndata.display(images[1]))
#print(len(images[0])) # 784
|
"""
Example of using a Decision Tree
to remodel a photograph
"""
from sklearn.tree import DecisionTreeRegressor
from PIL import Image
import numpy as np
import pandas as pd
# Put your own file name here
INPUT_FILE = 'bbtor.jpg'
OUTPUT_FILE = 'output.png'
# Experiment with these parameters
SAMPLE_SIZE = 50000
DEPTH = 200
# read an input RGB image
im = Image.open(INPUT_FILE)
target = np.array(im)
# loop through color channels
result = target.copy()
for i in range(3):
df = pd.DataFrame(target[:,:,i])
df = df.stack().reset_index()
df.columns = ['x', 'y', 'color']
training = df.sample(SAMPLE_SIZE)
# train a model that predicts the color from the coordinates
X = training[['x','y']]
m = DecisionTreeRegressor(max_depth=DEPTH)
m.fit(X, training['color'])
ypred = m.predict(df[['x', 'y']]) # predict on all data
# merge the prediction into the result image
result[:,:,i] = ypred.reshape((im.size[1], im.size[0]))
output = Image.fromarray(result)
output.save(OUTPUT_FILE)
|
import os
def environ_get(key, default=None):
retval = os.environ.get(key, default=default)
if key not in os.environ:
print(
"environ_get: Env Var not defined! Using default! Attempted={}, default={}".format(
key, default
)
)
return retval
def environ_append(key, value, separator=" ", force=False):
old_value = os.environ.get(key)
if old_value is not None:
value = old_value + separator + value
os.environ[key] = value
def environ_prepend(key, value, separator=" ", force=False):
old_value = os.environ.get(key)
if old_value is not None:
value = value + separator + old_value
os.environ[key] = value
def environ_remove(key, value, separator=":", force=False):
old_value = os.environ.get(key)
if old_value is not None:
old_value_split = old_value.split(separator)
value_split = [x for x in old_value_split if x != value]
value = separator.join(value_split)
os.environ[key] = value
def environ_set(key, value):
os.environ[key] = value
def path_append(value):
if os.path.exists(value):
environ_append("PATH", value, ":")
def path_prepend(value, force=False):
if os.path.exists(value):
environ_prepend("PATH", value, ":", force)
|
import string # The random and string libraries are used to generate a random string with flexible criteria
import random
# Random Key Generator
key = ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits + '^!\$%&/()=?{[]}+~#-_.:,;<>|\\') for _ in range(1024))
# the for loop defines the key size, key size is 1 KB which if you remember in our TCP shell, it matches the TCP socket size :)
# the "".join will put the result for the random strings into a sequqnece and we finally will store it in a key variable
# so all in all the for loop will generate a 1024 random string which are matching our criteria and . join is used to gather these strings into a sequenece
print key
print '\n' + 'Key length = ' + str ( len(key) )
# After we generate the XOR key, you need to take into consideration the XOR encrytpion rule which says the key length must be greater or equal the msg/data
# which we will send over the tunnel. len(key) >= len(message)
message = 'ipconfig' # this is the message which we will encrypt before it's getting sent
print "Msg is " + message + '\n'
# here i defined a dedicated function called str_xor, we will pass two values to this fucntion, the first value is the message(s1) that we want to encrypt or decrypt,
# and the second paramter is the xor key(s2). We were able to bind the encryption and the decryption phases in one function because the xor operation is exactly the
# same when we encrypt or decrpyt, the only difference is that when we encrypt we pass the message in clear text and when we want to decrypt we pass the encrypted message
def str_xor(s1, s2):
return "".join([chr(ord(c1) ^ ord(c2)) for (c1,c2) in zip(s1,s2)])
# first we split the message and the xor key to a list of character pair in tuples format >> for (c1,c2) in zip(s1,s2)
# next we will go through each tuple, and converting them to integer using (ord) function, once they converted into integers we can now
# perform exclusive OR on them >> ord(c1) ^ ord(c2)
# then convert the result back to ASCII using (chr) function >> chr(ord(c1) ^ ord(c2))
# last step we will merge the resulting array of characters as a sequqnece string using >>> "".join function
#Here we do a quick test
enc = str_xor(message, key)
print 'Encrypted messge is: ' + '\n' + enc + '\n'
dec = str_xor(enc, key)
print 'Decrypted messge is: ' + '\n' + dec
#Make sure that the SAME Key is HARDCODED in the Server AND client, ohterwise you won't be able to decode your own messages!
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
import datetime
from .utilities import *
def end_session(request):
if 'auth_token' in request.COOKIES:
end_session_by_token(request.COOKIES['auth_token'])
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
def add_user(request):
error_messages = []
login = request.POST['login']
password = request.POST['password']
if password != request.POST['double-check-password']:
error_messages.append('Пароли не совпадают')
with connect() as con:
result = con.execute('SELECT user_id FROM users WHERE login = ?', (login, )).fetchone()
if result:
error_messages.append('Пользователь с таким логином уже существует')
elif not error_messages:
con.execute('INSERT INTO users (password, login) VALUES (?, ?)',
(get_hash(password), login))
con.commit()
if error_messages:
return render(request, 'my_auth/register.html', {'error_messages':error_messages})
else:
return HttpResponseRedirect(reverse('movies:index'))
def register(request):
return render(request, 'my_auth/register.html', {'error_messages':[]})
def update_auth_token(user):
end_expired_sessions()
with connect() as con:
con.execute('DELETE FROM auth_tokens WHERE user_id = ?', (user['user_id'],))
con.commit()
new_token = get_hash(user['login'], user['password'], datetime.datetime.now())
with connect() as con:
con.execute('INSERT INTO auth_tokens VALUES (?, ?, ?)',
(user['user_id'], new_token, datetime.datetime.now() + datetime.timedelta(days=30)))
con.commit()
return new_token
def login(request):
end_session(request)
login = request.POST['login']
password = request.POST['password']
with connect() as con:
user = con.execute('SELECT * FROM users WHERE login = ?', (login,)).fetchone()
if not user or get_hash(password) != user['password']:
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
response = HttpResponseRedirect(request.META.get('HTTP_REFERER'))
response.set_cookie('login', user['login'],
max_age=datetime.timedelta(days=30).total_seconds())
response.set_cookie('user_id', str(user['user_id']),
max_age=datetime.timedelta(days=30).total_seconds())
response.set_cookie('auth_token', str(update_auth_token(user)),
max_age=datetime.timedelta(days=30).total_seconds())
return response
def logout(request):
end_session(request)
response = HttpResponseRedirect(request.META.get('HTTP_REFERER'))
response.delete_cookie('login')
response.delete_cookie('user_id')
response.delete_cookie('auth_token')
return response
|
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
from typing import Any, Dict
from assemble_workflow.bundle_location import BundleLocation
from manifests.build_manifest import BuildComponent, BuildManifest
from manifests.bundle_manifest import BundleManifest
class BundleRecorder:
def __init__(self, build: BuildManifest.Build, output_dir: str, artifacts_dir: str, bundle_location: BundleLocation) -> None:
self.output_dir = output_dir
self.build_id = build.id
self.bundle_location = bundle_location
self.version = build.version
self.package_name = self.__get_package_name(build)
self.artifacts_dir = artifacts_dir
self.architecture = build.architecture
self.bundle_manifest = self.BundleManifestBuilder(
build.id,
build.name,
build.version,
build.platform,
build.architecture,
self.__get_package_location(),
)
def __get_package_name(self, build: BuildManifest.Build) -> str:
parts = [
build.filename,
build.version,
build.platform,
build.architecture,
]
return "-".join(parts) + (".zip" if build.platform == "windows" else ".tar.gz")
# Assembled output are expected to be served from a separate "dist" folder
# Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/dist/
def __get_package_location(self) -> str:
return self.bundle_location.get_bundle_location(self.package_name)
# Build artifacts are expected to be served from a "builds" folder
# Example: https://ci.opensearch.org/ci/dbc/bundle-build/1.2.0/build-id/linux/x64/builds/
def __get_component_location(self, component_rel_path: str) -> str:
return self.bundle_location.get_build_location(component_rel_path)
def record_component(self, component: BuildComponent, rel_path: str) -> None:
self.bundle_manifest.append_component(
component.name,
component.repository,
component.ref,
component.commit_id,
self.__get_component_location(rel_path),
)
def get_manifest(self) -> BundleManifest:
return self.bundle_manifest.to_manifest()
def write_manifest(self, folder: str) -> None:
manifest_path = os.path.join(folder, "manifest.yml")
self.get_manifest().to_file(manifest_path)
class BundleManifestBuilder:
def __init__(self, build_id: str, name: str, version: str, platform: str, architecture: str, location: str) -> None:
self.data: Dict[str, Any] = {}
self.data["build"] = {}
self.data["build"]["id"] = build_id
self.data["build"]["name"] = name
self.data["build"]["version"] = str(version)
self.data["build"]["platform"] = platform
self.data["build"]["architecture"] = architecture
self.data["build"]["location"] = location
self.data["schema-version"] = "1.1"
# We need to store components as a hash so that we can append artifacts by component name
# When we convert to a BundleManifest this will get converted back into a list
self.data["components"] = []
def append_component(self, name: str, repository_url: str, ref: str, commit_id: str, location: str) -> None:
component = {
"name": name,
"repository": repository_url,
"ref": ref,
"commit_id": commit_id,
"location": location,
}
self.data["components"].append(component)
def to_manifest(self) -> BundleManifest:
return BundleManifest(self.data)
|
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
def fib_rec(n):
if n <= 0:
raise ValueError("n must be >= 1")
if n == 1 or n == 2:
return 1
# rekursiver Abstieg
return fib_rec(n - 1) + fib_rec(n - 2)
def fib_iterative(n):
if n <= 0:
raise ValueError("n must be >= 1")
if n == 1 or n == 2:
return 1
fib_n_2 = 1
fib_n_1 = 1
for _ in range(2, n):
fib_n = fib_n_1 + fib_n_2
# um eins "weiterschieben"
fib_n_2 = fib_n_1
fib_n_1 = fib_n
return fib_n
def gcd(a, b):
# rekursiver Abbruch
if b == 0:
return a
# rekursiver Abstieg
return gcd(b, a % b)
def gcd_iterative(a, b):
while b != 0:
remainder = a % b
a = b
b = remainder
# hier gilt b == 0
return a
def lcm(a, b):
return a * b // gcd(a, b)
def is_number_palindrome_rec(number):
return __is_number_palindrome_rec_helper(number, 0, number)
def __is_number_palindrome_rec_helper(original_number, current_value,
remaining_value):
# rekursiver Abbruch
if current_value == original_number:
return True
# rekursiver Abbruch
if (remaining_value < 1):
return False
last_digit = remaining_value % 10
new_current = current_value * 10 + last_digit
new_remaining = remaining_value // 10
return __is_number_palindrome_rec_helper(original_number, new_current,
new_remaining)
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created at 2020.10.10 by Zhang Yiming
"""
import os
from glob import glob
from multiprocessing import cpu_count, Pool
from shutil import rmtree
import click
import pybedtools as pb
from src import diff_cluster, diff_segmentReadCounts, diff_ru, diff_test
from src.functions import sort_bedfile
def multiprocessing_diff_ru(data):
for i in data["files"]:
sort_bedfile(i, i, sort_by_bedtools=True, add_header = False)
diff_ru.diff_ru(
data["files"],
segments=data["segments"],
condition=data["condition"],
input_cp=data["input_cp"],
output=data["output"],
min_segments=data["min_segments"],
verbose=data["verbose"]
)
pb.helpers.cleanup()
@click.command()
@click.option(
"-i", "--input_dir", type=click.Path(exists=True),
help="Path to climb output directory", required = True
)
@click.option("-o", "--output", type=click.Path(),help="Prefix of output file", required=True)
@click.option("--min-conditions", type=float, default=1, help="Minimum number of conditions for a gene to be clustered across conditions.", show_default=True)
@click.option(
"--minpts", type=int, default=0, show_default=True,
help="List of space-delimited DBSCAN minPts values. These indicate the minimum # points for DBSCAN to consider a core point. The minimum of this list will be used to cluster across conditions."
)
@click.option("--min-fc", type=float, default=-1,help="Minimum fold change for change points.", show_default=True)
@click.option("--min-expn", type=float, default=0, help="Minimum expression in exons for a gene to be clustered.", show_default=True)
@click.option("--lm-flag", type=click.BOOL, default=False, show_default=True, help="Input are results from diff_cluster.")
@click.option("--ss-flag", type=click.BOOL, default=False, show_default=True, help="Flag: RNA-Seq is strand-specific.")
@click.option(
"--eps", type=float, default=-1, show_default=True,
help="Maximum distance between 2 points in a neighborhood. -1.0 indicates using the minimum optimal window size from mountain climber."
)
@click.option("--bgminus", type=click.Path(exists=True),help="List of space-delimited bedgraphs: minus strand. One file per line", show_default=True)
@click.option("--min-segments", type=int, default=3, help="Minimum number of segments required in the TU to calculate relative end usage", show_default=True)
@click.option("-p", "--processes", type=click.IntRange(1, cpu_count(), clamp=True), default=1, help="How many processes to use", show_default=True)
@click.option("--min-dstl-cov", type=float, default=5, help="Minimum average reads per bp in distal segment across samples in at least 1 condition.", show_default=True)
@click.option("--min-prxl-cov", type=float, default=0, help="Minimum average reads per bp in proximal segment across samples in all conditions.", show_default=True)
@click.option("--pmax", type=float, default=0.05, help="Maximum p-value.", show_default=True)
@click.option("--dtop-abs-dif-min", type=float, default=0.05, help="Minimum relative usage (RU) difference.", show_default=True)
@click.option("--root", type=str, help="The root to compare to.")
@click.option('--verbose', type=click.BOOL, default=False, show_default=True, help='Print progress.')
@click.option('--keep', type=click.BOOL, default=False, show_default=True, help='Keep temp file of diff_test.')
def diff(
input_dir: str, output: str, min_conditions: int,
minpts: int, min_fc: float, min_expn: float,
lm_flag: bool, eps: float, ss_flag: bool,
bgminus: str, min_segments: int, processes: int,
min_dstl_cov:float, min_prxl_cov:float, pmax: float, dtop_abs_dif_min: float,
verbose: bool, keep: bool, root: str
):
u"""
differential analysis after climb pipline
\f
Output files include _cluster_totals.txt, _segments.bed, _cp.bed, and one _cp.bed file for each condition. _cp.bed name field = label_prioritized;condition_labels:gene:TUstart:TUend:chrom:strand:dbscan_epsilon:min_clustered_change_point:max_clustered_change_point:cluster_standard_deviation:total_clusters. _segments.bed name field = label_prioritized_cp1;condition_labels_cp1|label_prioritized_cp2;condition_labels_cp2:gene:TUstart:TUend:chrom:strand:dbscan_epsilon.
"""
output = os.path.abspath(output)
temp_dir = output + "_tmp"
os.makedirs(temp_dir, exist_ok=True)
pb.helpers.set_tempdir(temp_dir)
# diff cluster
cp_files = {}
for x in glob(os.path.join(input_dir, "*_CP.bed")):
c = os.path.basename(x).replace("_CP.bed", "").split("_")[1:]
c = "_".join(c).strip("_")
temp = cp_files.get(c, [])
temp.append(x)
cp_files[c] = temp
# construct the pairs
comparisions = []
if root:
for c in cp_files.keys():
if c != root:
comparisions.append([root, c])
else:
cs = sorted(cp_files.keys())
for i in range(len(cs)):
for j in range(i+1, len(cs)):
comparisions.append([cs[i], cs[j]])
for i in comparisions:
temp_diff_cluster_output = os.path.join(output, "_vs_".join(i), "diff_cluster")
temp_diff_segments_output = os.path.join(output, "_vs_".join(i), "segments_read_counts")
temp_diff_ru_output = os.path.join(output, "_vs_".join(i), "diff_ru")
temp_diff_test_output = os.path.join(output, "_vs_".join(i), "diff_test")
os.makedirs(temp_diff_cluster_output, exist_ok=True)
temp_files, conditions = [], []
for c in i:
temp_files += cp_files[c]
conditions += [c for _ in range(len(cp_files[c]))]
diff_cluster.cluster(
temp_files, os.path.join(temp_diff_cluster_output, "diff"),
conditions, min_conditions, [minpts for _ in range(len(temp_files))],
min_fc, min_expn, lm_flag, eps, ss_flag,
verbose=verbose
)
pb.helpers.cleanup()
# diff segment read count
input_file, conditions = [], []
for c in i:
for x in glob(os.path.join(input_dir, f"*_{c}.bedgraph")):
rc = os.path.basename(x).replace(".bedgraph", "").split("_")
rc = "_".join(rc[1:])
if rc == c:
input_file.append(x)
# conditions.append(os.path.basename(x).replace(".bedgraph", ""))
conditions.append("")
bgminus_list = None
if bgminus:
with open(bgminus) as r:
bgminus_list = [x.strip() for x in r if x and os.path.exists(x.strip())]
seg = os.path.join(temp_diff_cluster_output, "diff_segments.bed")
sort_bedfile(seg, seg, add_header = False, sort_by_bedtools = True)
os.makedirs(temp_diff_segments_output, exist_ok=True)
diff_segmentReadCounts.read_count(
segments=seg,
conditions=conditions,
bgplus=input_file,
bgminus=bgminus_list,
output=os.path.join(temp_diff_segments_output, "diff"),
n_jobs=processes,
)
pb.helpers.cleanup()
# diff_ru
input_file = {}
for x in glob(os.path.join(temp_diff_segments_output, "*_readCounts.bed")):
c = os.path.basename(x).replace("_readCounts.bed", "").replace("diff_", "").split("_")
c = "_".join(c[1:]).strip("_")
temp = input_file.get(c, [])
temp.append(x)
input_file[c] = temp
cmds = []
for c, files in input_file.items():
cmds.append({
"files": files, "condition": c,
"segments": os.path.join(temp_diff_cluster_output, "diff_segments.bed"),
"input_cp": os.path.join(temp_diff_cluster_output, f"diff_cp_{c}.bed"),
"output": os.path.join(temp_diff_ru_output, "diff"), "min_segments": min_segments,
"verbose": verbose
})
os.makedirs(temp_diff_ru_output, exist_ok=True)
with Pool(min(processes, len(cmds))) as p:
p.map(multiprocessing_diff_ru, cmds)
cmds = {
"input_file": [], "conditions_input": [],
"ru_segments": set(), "conditions_ru_segments": set()
}
for c in i:
files = input_file[c]
for f in files:
cmds["input_file"].append(f)
cmds["conditions_input"].append(c)
cmds["ru_segments"].add(os.path.join(temp_diff_ru_output, f"diff_ru_segments_{c}.bed"))
cmds["conditions_ru_segments"].add(c)
os.makedirs(temp_diff_test_output, exist_ok=True)
diff_test.test(
input_file=cmds["input_file"], ru_segments=list(cmds["ru_segments"]),
output=os.path.join(temp_diff_test_output, f"diff_{'_'.join(i)}"),
conditions_input=cmds["conditions_input"], conditions_ru_segments=list(cmds["conditions_ru_segments"]),
dtop_abs_dif_min=dtop_abs_dif_min, min_dstlCov=min_dstl_cov,
min_prxlCov=min_prxl_cov, pmax=pmax,
verbose=verbose, keep=keep
)
if os.path.exists(temp_dir):
rmtree(temp_dir)
# python src/diff_cluster.py -i tests/science/C000R7B4_neutrophil_CP.bed tests/science/S001QBB1_HSC_CP.bed -c nerutrophil HSC -o tests/diff_test1
# python src/diff_segmentReadCounts.py -i tests/diff_test_segments.bed -p tests/science/C000R7B4_neutrophil.bedgraph tests/science/S001QBB1_HSC.bedgraph -c nerutrophil HSC -o tests/diff
# python src/diff_ru.py -i tests/diff_C000R7B4_neutrophil_nerutrophil_readCounts.bed -s tests/diff_test_segments.bed -c neutrophil -o tests/diff_neutrophil -l tests/diff_test_cp_nerutrophil.bed
|
a,b,c = input().split(" ")
a,b,c = float(a),float(b),float(c)
if ( abs(b-c) < a and a < (b + c) ) and ( abs(a-c) < b and b < (a+c) ) and (abs(a-b) < c and c < (a+b)):
print("Perimetro = %0.1f" % (a+b+c))
else:
print("Area = %0.1f" % (((a+b)*c) / 2) )
|
# A script that just does the job: checks a sentence:
#
# >> python .\perturbvalidate\visualization\validate_sentence.py 'Пушистые котики мурлыкают и не только'
# Using model C:\Users\Vadim\Documents\prog\perturb-validate\models\half_inflected.pickle
# Perturbed!
# Using model C:\Users\Vadim\Documents\prog\perturb-validate\models\half_lemmatized.pickle
# A-OK valid sentence!
# Using model C:\Users\Vadim\Documents\prog\perturb-validate\models\shuffle.pickle
# Perturbed!
# Using model C:\Users\Vadim\Documents\prog\perturb-validate\models\slight.pickle
# Perturbed!
#
# If you need a particular model, use --model
# >> python .\perturbvalidate\visualization\validate_sentence.py 'Пушистые котики мурлыкают и не только' --model shuffle.pickle
# Perturbed!
if __name__ == '__main__':
import os
import click
import pickle
from pathlib import Path
from perturbvalidate.features.embed import embed_sentences, tokenize
from perturbvalidate.models.perdict_model import validate_sentences
from perturbvalidate.data.files import open_models
validation_msgs = {
True: 'A-OK valid sentence!',
False: 'Wrong! Make sure your sentence ends with a period.'
}
def load_model_and_validate(model_file, text):
model = pickle.load(model_file)
embedding = embed_sentences(tokenize(text))
for idx, is_perturbed in enumerate(validate_sentences(model, embedding)):
print(f'Sentence {idx}: {validation_msgs[is_perturbed]}')
@click.command()
@click.argument('sentence', type=str, default='Пушистые котики мурлыкают и не только.')
@click.option('--model', help='model to use, e.g. lstm/perturb.pickle')
def validate(sentence, model):
project_dir = Path(__file__).resolve().parents[2]
model_path = os.path.join(project_dir, 'models')
if model:
if model_path not in model:
model = os.path.join(model_path, model)
with open(model, 'rb') as f:
load_model_and_validate(f, sentence)
else:
for model_name, model_f in open_models('rb'):
with model_f:
print(f'Using model ' + '/'.join(model_name))
load_model_and_validate(model_f, sentence)
validate()
|
from pathlib import Path
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.session import SparkSession
from pyspark.sql.types import StructType
from tests.conftest import clean_spark_session
from spark_pipeline_framework.transformers.framework_csv_loader.v1.framework_csv_loader import (
FrameworkCsvLoader,
)
def assert_results(result: DataFrame) -> None:
"""
Shared asserts for the different formats of CSV file, all of which contain the same data.
"""
# Assert
assert result.count() == 3
assert result.collect()[1][0] == "2"
assert result.collect()[1][1] == "bar"
assert result.collect()[1][2] == "bar2"
# noinspection SqlNoDataSourceInspection
def test_can_load_simple_csv(spark_session: SparkSession) -> None:
# Arrange
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
test_file_path: str = f"{data_dir.joinpath('test.csv')}"
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
# Act
FrameworkCsvLoader(
view="my_view", filepath=test_file_path, delimiter=","
).transform(df)
# noinspection SqlDialectInspection
result: DataFrame = spark_session.sql("SELECT * FROM my_view")
result.show()
# Assert
assert_results(result)
# noinspection SqlNoDataSourceInspection
def test_can_load_non_standard_delimited_csv(spark_session: SparkSession) -> None:
# Arrange
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
test_file_path: str = f"{data_dir.joinpath('test.psv')}"
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
# Act
loader = FrameworkCsvLoader(view="my_view", filepath=test_file_path, delimiter="|")
loader.transform(df)
# noinspection SqlDialectInspection
result: DataFrame = spark_session.sql("SELECT * FROM my_view")
result.show()
# Assert
assert loader.getDelimiter() == "|"
assert_results(result)
# noinspection SqlNoDataSourceInspection
def test_can_load_csv_without_header(spark_session: SparkSession) -> None:
# Arrange
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
test_file_path: str = f"{data_dir.joinpath('no_header.csv')}"
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
# Act
FrameworkCsvLoader(
view="another_view", filepath=test_file_path, delimiter=",", has_header=False
).transform(df)
# noinspection SqlDialectInspection
result: DataFrame = spark_session.sql("SELECT * FROM another_view")
# Assert
assert_results(result)
# noinspection SqlNoDataSourceInspection
def test_correctly_loads_csv_with_clean_flag_off(spark_session: SparkSession) -> None:
# Arrange
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
test_file_path: str = f"{data_dir.joinpath('column_name_test.csv')}"
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
# Act
FrameworkCsvLoader(
view="my_view",
filepath=test_file_path,
delimiter=",",
clean_column_names=False,
).transform(df)
# noinspection SqlDialectInspection
result: DataFrame = spark_session.sql("SELECT * FROM my_view")
# Assert
assert_results(result)
assert result.collect()[1][0] == "2"
assert (
result.columns[2] == "Ugly column,with;chars{that}parquet(does)not like=much_-"
)
# noinspection SqlNoDataSourceInspection
def test_correctly_loads_csv_with_clean_flag_on(spark_session: SparkSession) -> None:
# Arrange
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
test_file_path: str = f"{data_dir.joinpath('column_name_test.csv')}"
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
# Act
FrameworkCsvLoader(
view="my_view", filepath=test_file_path, delimiter=",", clean_column_names=True,
).transform(df)
# noinspection SqlDialectInspection
result: DataFrame = spark_session.sql("SELECT * FROM my_view")
# Assert
assert_results(result)
assert result.collect()[1][0] == "2"
assert (
result.columns[2] == "Ugly_column_with_chars_that_parquet_does_not_like_much_-"
)
def test_can_load_multiline_csv(spark_session: SparkSession) -> None:
# Arrange
clean_spark_session(spark_session)
data_dir: Path = Path(__file__).parent.joinpath("./")
test_file_path: str = f"{data_dir.joinpath('multiline_row.csv')}"
schema = StructType([])
df: DataFrame = spark_session.createDataFrame(
spark_session.sparkContext.emptyRDD(), schema
)
# Act
FrameworkCsvLoader(
view="my_view", filepath=test_file_path, delimiter=",", multiline=True
).transform(df)
# noinspection SqlDialectInspection
result: DataFrame = spark_session.sql("SELECT * FROM my_view")
assert 1 == result.count()
|
from rl.envs import SlipEnv
from rl.policies import GaussianMLP
from rl.algos import DAgger
from rl.utils import policyplot
import torch
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--dagger_itr", type=int, default=100,
help="number of iterations of DAgger")
parser.add_argument("--epochs", type=int, default=10,
help="number of optimization epochs")
parser.add_argument("--trj_len", type=int, default=10000,
help="maximum trajectory length")
parser.add_argument("--seed", type=int, default=1,
help="random seed for experiment")
args = parser.parse_args()
if __name__ == "__main__":
torch.manual_seed(args.seed)
env = SlipEnv(0.001)
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
learner = GaussianMLP(obs_dim, action_dim, (64,))
algo = DAgger(env, learner, None)
algo.train(
dagger_itr=args.dagger_itr,
epochs=args.epochs,
trj_len=args.trj_len
)
policyplot(env, learner, args.trj_len)
|
from pkcrypt.cli import cli
if __name__ == '__main__': cli()
|
import numpy as np
import os
import tensorflow as tf
from tagger.data_utils import minibatches, pad_sequences, get_chunks
from tagger.general_utils import Progbar, print_sentence, extract_labels
class NERModel(object):
def __init__(self, config, embeddings, ntags, nchars=None, logger=None):
"""
Args:
config: class with hyper parameters
embeddings: np array with embeddings
nchars: (int) size of chars vocabulary
logger: logger instance
"""
self.config = config
self.embeddings = embeddings
self.nchars = nchars
self.ntags = ntags
if logger is None:
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(message)s', level=logging.DEBUG)
self.logger = logger
def add_placeholders(self):
"""
Adds placeholders to self
"""
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None],
name="word_ids")
# shape = (batch size)
self.sequence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sequence_lengths")
# shape = (batch size, max length of sentence, max length of word)
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# shape = (batch size, max length of sentence in batch)
self.labels = tf.placeholder(tf.int32, shape=[None, None],
name="labels")
# hyper parameters
self.dropout = tf.placeholder(dtype=tf.float32, shape=[],
name="dropout")
self.lr = tf.placeholder(dtype=tf.float32, shape=[],
name="lr")
def get_feed_dict(self, words, labels=None, lr=None, dropout=None):
"""
Given some data, pad it and build a feed dictionary
Args:
words: list of sentences. A sentence is a list of ids of a list of words.
A word is a list of ids
labels: list of ids
lr: (float) learning rate
dropout: (float) keep prob
Returns:
dict {placeholder: value}
"""
# perform padding of the given data
if self.config.chars:
char_ids, word_ids = zip(*words)
word_ids, sequence_lengths = pad_sequences(word_ids, 0)
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=0, nlevels=2)
else:
word_ids, sequence_lengths = pad_sequences(words, 0)
# build feed dictionary
feed = {
self.word_ids: word_ids,
self.sequence_lengths: sequence_lengths
}
if self.config.chars:
feed[self.char_ids] = char_ids
feed[self.word_lengths] = word_lengths
if labels is not None:
labels, _ = pad_sequences(labels, 0)
feed[self.labels] = labels
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, sequence_lengths
def add_word_embeddings_op(self):
"""
Adds word embeddings to self
"""
with tf.variable_scope("words"):
_word_embeddings = tf.Variable(self.embeddings, name="_word_embeddings", dtype=tf.float32,
trainable=self.config.train_embeddings)
word_embeddings = tf.nn.embedding_lookup(_word_embeddings, self.word_ids,
name="word_embeddings")
with tf.variable_scope("chars"):
if self.config.chars:
# get embeddings matrix
_char_embeddings = tf.get_variable(name="_char_embeddings", dtype=tf.float32,
shape=[self.nchars, self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(_char_embeddings, self.char_ids,
name="char_embeddings")
# put the time dimension on axis=1
s = tf.shape(char_embeddings)
char_embeddings = tf.reshape(char_embeddings, shape=[-1, s[-2], self.config.dim_char])
word_lengths = tf.reshape(self.word_lengths, shape=[-1])
# bi lstm on chars
lstm_cell = tf.contrib.rnn.LSTMCell(self.config.char_hidden_size,
state_is_tuple=True)
_, ((_, output_fw), (_, output_bw)) = tf.nn.bidirectional_dynamic_rnn(lstm_cell,
lstm_cell, char_embeddings, sequence_length=word_lengths,
dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
# shape = (batch size, max sentence length, char hidden size)
output = tf.reshape(output, shape=[-1, s[1], 2*self.config.char_hidden_size])
word_embeddings = tf.concat([word_embeddings, output], axis=-1)
self.word_embeddings = tf.nn.dropout(word_embeddings, self.dropout)
def add_logits_op(self):
"""
Adds logits to self
"""
with tf.variable_scope("bi-lstm"):
lstm_cell = tf.contrib.rnn.LSTMCell(self.config.hidden_size)
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(lstm_cell,
lstm_cell, self.word_embeddings, sequence_length=self.sequence_lengths,
dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
output = tf.nn.dropout(output, self.dropout)
with tf.variable_scope("proj"):
W = tf.get_variable("W", shape=[2*self.config.hidden_size, self.ntags],
dtype=tf.float32)
b = tf.get_variable("b", shape=[self.ntags], dtype=tf.float32,
initializer=tf.zeros_initializer())
ntime_steps = tf.shape(output)[1]
output = tf.reshape(output, [-1, 2*self.config.hidden_size])
pred = tf.matmul(output, W) + b
self.logits = tf.reshape(pred, [-1, ntime_steps, self.ntags])
def add_pred_op(self):
"""
Adds labels_pred to self
"""
if not self.config.crf:
self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1), tf.int32)
def add_loss_op(self):
"""
Adds loss to self
"""
if self.config.crf:
log_likelihood, self.transition_params = tf.contrib.crf.crf_log_likelihood(
self.logits, self.labels, self.sequence_lengths)
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.sequence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
# for tensorboard
tf.summary.scalar("loss", self.loss)
def add_train_op(self):
"""
Add train_op to self
"""
with tf.variable_scope("train_step"):
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.minimize(self.loss)
def add_init_op(self):
self.init = tf.global_variables_initializer()
def add_summary(self, sess):
# tensorboard stuff
self.merged = tf.summary.merge_all()
self.file_writer = tf.summary.FileWriter(self.config.output_path, sess.graph)
def build(self):
self.add_placeholders()
self.add_word_embeddings_op()
self.add_logits_op()
self.add_pred_op()
self.add_loss_op()
self.add_train_op()
self.add_init_op()
def predict_batch(self, sess, words):
"""
Args:
sess: a tensorflow session
words: list of sentences
Returns:
labels_pred: list of labels for each sentence
sequence_length
"""
fd, sequence_lengths = self.get_feed_dict(words, dropout=1.0)
if self.config.crf:
viterbi_sequences = []
logits, transition_params = sess.run([self.logits, self.transition_params],
feed_dict=fd)
# iterate over the sentences
for logit, sequence_length in zip(logits, sequence_lengths):
# keep only the valid time steps
logit = logit[:sequence_length]
viterbi_sequence, viterbi_score = tf.contrib.crf.viterbi_decode(
logit, transition_params)
viterbi_sequences += [viterbi_sequence]
return viterbi_sequences, sequence_lengths
else:
labels_pred = sess.run(self.labels_pred, feed_dict=fd)
return labels_pred, sequence_lengths
def run_epoch(self, sess, train, dev, tags, epoch):
"""
Performs one complete pass over the train set and evaluate on dev
Args:
sess: tensorflow session
train: dataset that yields tuple of sentences, tags
dev: dataset
tags: {tag: index} dictionary
epoch: (int) number of the epoch
"""
nbatches = (len(train) + self.config.batch_size - 1) / self.config.batch_size
prog = Progbar(target=nbatches)
for i, (words, labels) in enumerate(minibatches(train, self.config.batch_size)):
fd, _ = self.get_feed_dict(words, labels, self.config.lr, self.config.dropout)
_, train_loss, summary = sess.run([self.train_op, self.loss, self.merged], feed_dict=fd)
prog.update(i + 1, [("train loss", train_loss)])
# tensorboard
if i % 10 == 0:
self.file_writer.add_summary(summary, epoch*nbatches + i)
acc, f1 = self.run_evaluate(sess, dev, tags)
self.logger.info("- dev acc {:04.2f} - f1 {:04.2f}".format(100*acc, 100*f1))
return acc, f1
def run_evaluate(self, sess, test, tags):
"""
Evaluates performance on test set
Args:
sess: tensorflow session
test: dataset that yields tuple of sentences, tags
tags: {tag: index} dictionary
Returns:
accuracy
f1 score
"""
accs = []
correct_preds, total_correct, total_preds = 0., 0., 0.
for words, labels in minibatches(test, self.config.batch_size):
labels_pred, sequence_lengths = self.predict_batch(sess, words)
for lab, lab_pred, length in zip(labels, labels_pred, sequence_lengths):
lab = lab[:length]
lab_pred = lab_pred[:length]
def unpack(t):
a, b = t
return a == b
accs += map(unpack, zip(lab, lab_pred))
lab_chunks = set(get_chunks(lab, tags))
lab_pred_chunks = set(get_chunks(lab_pred, tags))
correct_preds += len(lab_chunks & lab_pred_chunks)
total_preds += len(lab_pred_chunks)
total_correct += len(lab_chunks)
p = correct_preds / total_preds if correct_preds > 0 else 0
r = correct_preds / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
acc = np.mean(accs)
return acc, f1
def train(self, train, dev, tags):
"""
Performs training with early stopping and lr exponential decay
Args:
train: dataset that yields tuple of sentences, tags
dev: dataset
tags: {tag: index} dictionary
"""
best_score = 0
saver = tf.train.Saver()
# for early stopping
nepoch_no_imprv = 0
with tf.Session() as sess:
sess.run(self.init)
# tensorboard
self.add_summary(sess)
for epoch in range(self.config.nepochs):
self.logger.info("Epoch {:} out of {:}".format(epoch + 1, self.config.nepochs))
acc, f1 = self.run_epoch(sess, train, dev, tags, epoch)
# decay learning rate
self.config.lr *= self.config.lr_decay
# early stopping and saving best parameters
if f1 >= best_score:
nepoch_no_imprv = 0
if not os.path.exists(self.config.model_output):
os.makedirs(self.config.model_output)
saver.save(sess, self.config.model_output, global_step=epoch * self.config.data_size)
best_score = f1
self.logger.info("- new best score!")
else:
nepoch_no_imprv += 1
if nepoch_no_imprv >= self.config.nepoch_no_imprv:
self.logger.info("- early stopping {} epochs without improvement".format(
nepoch_no_imprv))
break
def evaluate(self, test, tags):
saver = tf.train.Saver()
with tf.Session() as sess:
self.logger.info("Testing model over test set")
saver.restore(sess, self.config.model_output)
acc, f1 = self.run_evaluate(sess, test, tags)
self.logger.info("- test acc {:04.2f} - f1 {:04.2f}".format(100*acc, 100*f1))
def interactive_shell(self, tags, processing_word):
idx_to_tag = {idx: tag for tag, idx in tags.items()}
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, self.config.model_output)
self.logger.info("This is an interactive mode, enter a sentence:")
while True:
try:
sentence = input("input> ")
words_raw = sentence.strip().split(" ")
words = list(map(processing_word, words_raw))
if type(words[0]) == tuple:
words = zip(*words)
pred_ids, _ = self.predict_batch(sess, [words])
preds = list(map(lambda idx: idx_to_tag[idx], list(pred_ids[0])))
print_sentence(self.logger, {"x": words_raw, "y": preds})
except EOFError:
print("Closing session.")
break
def restore(self):
saver = tf.train.Saver()
self.sess = tf.Session()
with self.sess.as_default():
saver.restore(self.sess, self.config.model_output)
def process(self, tags, processing_word, sentence):
with self.sess.as_default():
idx_to_tag = {idx: tag for tag, idx in tags.items()}
words_raw = sentence.strip().split(" ")
words = list(map(processing_word, words_raw))
if type(words[0]) == tuple:
words = zip(*words)
pred_ids, _ = self.predict_batch(self.sess, [words])
preds = list(map(lambda idx: idx_to_tag[idx], list(pred_ids[0])))
return ' '.join(preds)
|
import logging
from secpy.core.mixins.base_network_client_mixin import BaseNetworkClientMixin
from secpy.core.ticker_company_exchange_map import TickerCompanyExchangeMap
class BaseEndpointMixin(BaseNetworkClientMixin):
_endpoint = None
def __init__(self, user_agent, **kwargs):
"""
Base class to be inherited by all classes that interact directly w/ the SEC REST API
@param user_agent: unique identifiers to use in headers when making requests to SEC REST API
@param kwargs: Misc
"""
super().__init__(user_agent, **kwargs)
self._logger = self.__set_logger()
self._ticker_cte_map = TickerCompanyExchangeMap(user_agent, **kwargs)
@staticmethod
def __set_logger():
logging.basicConfig(format="%(asctime)s-%(pathname)s-%(levelname)-%(message)s")
return logging
|
import json
from datetime import datetime
import requests
import datetime
import dateutil
import time
# origination
decimals = 1000000000000000000
time_now = time.time()
contract = "KT1PxkrCckgh5fA5v2cZEE2bX5q2RV1rv8dj"
origination = 1734719
level = 1735011
url = f"https://api.tzkt.io/v1/operations/transactions?sender={contract}&entrypoint=burn&status=applied&level={level}"
print(url)
operations = json.loads(requests.get(url).text)
for_output = []
for operation in operations:
value_readable = int(operation["parameter"]["value"]["value"]) / decimals
print(f'{operation["initiator"]["address"]} applied '
f'{operation["parameter"]["entrypoint"]} of '
f'{operation["parameter"]["value"]["value"]}'
f', which is {value_readable}')
if value_readable >= 10000:
for_output.append(operation)
print("will be saved to file")
with open("output.json", "w") as outfile:
outfile.write(json.dumps(for_output))
print("file saved")
|
import sys, os
kActionFrom_AllInOne = 0x0
kActionFrom_BurnOtp = 0x1
kBootDeviceMemBase_FlexspiNor = 0x08000000
kBootDeviceMemBase_FlexcommSpiNor = 0x0
kBootDeviceMemBase_UsdhcSd = 0x0
kBootDeviceMemBase_UsdhcMmc = 0x0
kBootDeviceMemXipSize_FlexspiNor = 0x08000000 #128MB
kBootDeviceMemXipSize_QuadspiNor = 0x08000000 #128MB
kRamFreeSpaceStart_LoadCommOpt = 0x0010c000
kRamFreeSpaceStart_LoadCfgBlock = 0x0010d000
|
class CustomField(object):
def __init__(self, key, value, tag):
self.key = key
self.value = value
self.tag = tag
|
import tensorflow.compat.v1 as tf
from detector.constants import NUM_KEYPOINTS, DATA_FORMAT
from detector.utils import batch_norm_relu, conv2d_same
from detector.fpn import feature_pyramid_network
DEPTH = 128
class KeypointSubnet:
def __init__(self, backbone_features, is_training, params):
"""
Arguments:
backbone_features: a dict with float tensors.
It contains keys ['c2', 'c3', 'c4', 'c5'].
is_training: a boolean.
params: a dict.
"""
self.enriched_features = feature_pyramid_network(
backbone_features, is_training, depth=DEPTH, min_level=2,
add_coarse_features=False, scope='keypoint_fpn'
)
normalized_enriched_features = {
n: batch_norm_relu(x, is_training, name=f'{n}_batch_norm')
for n, x in self.enriched_features.items()
}
# it is a dict with keys ['p2', 'p3', 'p4', 'p5']
upsampled_features = []
for level in range(2, 6):
with tf.variable_scope(f'phi_subnet_{level}'):
x = normalized_enriched_features[f'p{level}']
y = phi_subnet(x, is_training, upsample=2**(level - 2))
upsampled_features.append(y)
upsampled_features = tf.concat(upsampled_features, axis=1 if DATA_FORMAT == 'channels_first' else 3)
x = conv2d_same(upsampled_features, 64, kernel_size=3, name='final_conv3x3')
x = batch_norm_relu(x, is_training, name='final_bn')
p = 0.01 # probability of a keypoint
# sigmoid(-log((1 - p) / p)) = p
import math
value = -math.log((1.0 - p) / p)
keypoints_bias = 17 * [value]
bias_initializer = tf.constant_initializer(keypoints_bias + [0.0])
self.heatmaps = tf.layers.conv2d(
x, NUM_KEYPOINTS + 1, kernel_size=1, padding='same',
bias_initializer=bias_initializer,
kernel_initializer=tf.random_normal_initializer(stddev=1e-4),
data_format=DATA_FORMAT, name='heatmaps'
)
if DATA_FORMAT == 'channels_first':
self.heatmaps = tf.transpose(self.heatmaps, [0, 2, 3, 1])
self.enriched_features = {
n: tf.transpose(x, [0, 2, 3, 1])
for n, x in self.enriched_features.items()
}
def phi_subnet(x, is_training, upsample):
"""
Arguments:
x: a float tensor with shape [b, h, w, c].
is_training: a boolean.
upsample: an integer.
Returns:
a float tensor with shape [b, upsample * h, upsample * w, depth].
"""
x = conv2d_same(x, DEPTH, kernel_size=3, name='conv1')
x = batch_norm_relu(x, is_training, name='bn1')
x = conv2d_same(x, DEPTH, kernel_size=3, name='conv2')
x = batch_norm_relu(x, is_training, name='bn2')
if DATA_FORMAT == 'channels_first':
x = tf.transpose(x, [0, 2, 3, 1])
shape = tf.shape(x)
h, w = shape[1], shape[2]
new_size = [upsample * h, upsample * w]
x = tf.image.resize_bilinear(x, new_size)
if DATA_FORMAT == 'channels_first':
x = tf.transpose(x, [0, 3, 1, 2])
return x
|
APP_NAME = 'zenkly'
VALID_HC_TYPES = {'articles', 'categories', 'sections'}
|
"""
Lasso selection of data points
Draws a simple scatterplot of random data. Drag the mouse to use the lasso
selector, which allows you to circle all the points in a region.
Upon completion of the lasso operation, the indices of the selected points are
printed to the console.
Uncomment 'lasso_selection.incremental_select' line (line 74) to see the
indices of the selected points computed in real time.
"""
import sys
# Major library imports
from numpy import sort, compress, arange
from numpy.random import random
# Enthought library imports
from enable.api import Component, ComponentEditor
from traits.api import HasTraits, Instance
from traitsui.api import Item, Group, View
# Chaco imports
from chaco.api import ArrayPlotData, Plot, LassoOverlay
from chaco.tools.api import LassoSelection, ScatterInspector
#===============================================================================
# # Create the Chaco plot.
#===============================================================================
def _create_plot_component():
# Create some data
npts = 2000
x = sort(random(npts))
y = random(npts)
# Create a plot data obect and give it this data
pd = ArrayPlotData()
pd.set_data("index", x)
pd.set_data("value", y)
# Create the plot
plot = Plot(pd)
plot.plot(("index", "value"),
type="scatter",
name="my_plot",
marker="circle",
index_sort="ascending",
color="red",
marker_size=4,
bgcolor="white")
# Tweak some of the plot properties
plot.title = "Scatter Plot With Lasso Selection"
plot.line_width = 1
plot.padding = 50
# Right now, some of the tools are a little invasive, and we need the
# actual ScatterPlot object to give to them
my_plot = plot.plots["my_plot"][0]
# Attach some tools to the plot
lasso_selection = LassoSelection(component=my_plot,
selection_datasource=my_plot.index,
drag_button="left")
#drag_button="right")
my_plot.active_tool = lasso_selection
my_plot.tools.append(ScatterInspector(my_plot))
lasso_overlay = LassoOverlay(lasso_selection=lasso_selection,
component=my_plot)
my_plot.overlays.append(lasso_overlay)
# Uncomment this if you would like to see incremental updates:
#lasso_selection.incremental_select = True
return plot
#===============================================================================
# Attributes to use for the plot view.
size=(650,650)
title="Scatter plot with selection"
bg_color="lightgray"
#===============================================================================
# # Demo class that is used by the demo.py application.
#===============================================================================
class Demo(HasTraits):
plot = Instance(Component)
traits_view = View(
Group(
Item('plot', editor=ComponentEditor(size=size),
show_label=False),
orientation = "vertical"),
resizable=True, title=title
)
def _selection_changed(self):
mask = self.index_datasource.metadata['selection']
print("New selection: ")
print(compress(mask, arange(len(mask))))
# Ensure that the points are printed immediately:
sys.stdout.flush()
def _plot_default(self):
plot = _create_plot_component()
# Retrieve the plot hooked to the LassoSelection tool.
my_plot = plot.plots["my_plot"][0]
lasso_selection = my_plot.active_tool
# Set up the trait handler for the selection
self.index_datasource = my_plot.index
lasso_selection.on_trait_change(self._selection_changed,
'selection_changed')
return plot
demo = Demo()
if __name__ == "__main__":
demo.configure_traits()
|
from cancontroller.caniot.models import DeviceId
from cancontroller.caniot.device import Device
from cancontroller.controller.nodes import GarageDoorController, AlarmController
node_garage_door = GarageDoorController(DeviceId(DeviceId.Class.CUSTOMPCB, 0x02), "GarageDoorControllerProdPCB")
node_alarm = AlarmController(DeviceId(DeviceId.Class.CUSTOMPCB, 0x03), "AlarmController")
node_broadcast = Device(DeviceId.Broadcast(), "__broadcast__")
# TODO create an entity of broadcast device
class Devices:
"""
List all nodes on the current Bus
"""
devices = [
node_garage_door,
node_alarm,
node_broadcast
]
def __iter__(self):
for dev in self.devices:
yield dev
def get(self, name: str) -> Device:
for dev in self.devices:
if dev.name == name:
return dev
def __getitem__(self, name: str) -> Device:
return self.get(name)
def select(self, deviceid: DeviceId) -> Device:
for dev in self.devices:
if dev.deviceid == deviceid:
return dev
devices = Devices()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.