hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ae6d4b4a5db3498d8459e55098bc471d3ce7ad4
| 2,025
|
py
|
Python
|
CLIMA/views.py
|
weslleybulhoes/projeto-pibic
|
971918cd76fedce7dee297201b54fab076078d81
|
[
"MIT"
] | null | null | null |
CLIMA/views.py
|
weslleybulhoes/projeto-pibic
|
971918cd76fedce7dee297201b54fab076078d81
|
[
"MIT"
] | null | null | null |
CLIMA/views.py
|
weslleybulhoes/projeto-pibic
|
971918cd76fedce7dee297201b54fab076078d81
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.views.generic import View
import pandas as pd
import datetime
from utils.grafico_clima import gerando_grafico
from .templatetags.filtro_select import concatenando_string, removendo_string
import os
class Clima(View):
def get(self, *args, **kwargs):
if self.request.GET:
nome_arquivo = self.request.GET.get("arquivo")
titulo = concatenando_string(nome_arquivo)
requisicao = True
else:
requisicao = False
nome_arquivo = "dados_A327_PALMEIRA DOS INDIOS_2010-01-01_2020-12-31.csv"
titulo= ""
todos_arq = os.listdir("arquivos")
arq_cor = pd.read_csv(f"arquivos/{nome_arquivo}", header=9, sep=";", encoding="cp1252")
tipo_dado = self.request.GET.get("tipo_dado")
sufixo = arq_cor.columns[2:-1]
data_inicial = self.request.GET.get("data_inicial")
data_final = self.request.GET.get("data_final")
IC_max_min, IC_media, agrupando_data_media, arquivo_referencia, coluna, diagrama =\
gerando_grafico(arq_cor, tipo_dado, data_inicial, data_final)
context = {
"categories": IC_max_min,
'values': IC_media,
'data_inicial_filtro': str(agrupando_data_media['Data'][0]).replace('/', '-'),
'data_final_filtro': str(agrupando_data_media['Data'][len(agrupando_data_media["Data"]) - 1]).replace('/', '-'),
'data_inicial': str(arquivo_referencia['Data'][0]).replace('/', '-'),
'data_final': str(arquivo_referencia['Data'][len(arquivo_referencia["Data"]) - 1]).replace('/', '-'),
'colunas': str(coluna),
'requisicao': requisicao,
'sufixo': sufixo,
'todos_arq': todos_arq,
'titulo': titulo,
'diagrama': diagrama
}
return render(self.request, "clima/caracterizando_clima.html", context=context)
def index (request):
return render(request, "clima/index.html")
| 36.818182
| 124
| 0.631111
|
409317f0b2f4ad95db118be536e5182c0ff7f547
| 9,421
|
py
|
Python
|
build_helpers.py
|
neurospin/nipy
|
cc54600a0dca1e003ad393bc05c46f91eef30a68
|
[
"BSD-3-Clause"
] | 1
|
2016-03-08T15:01:06.000Z
|
2016-03-08T15:01:06.000Z
|
setup_helpers.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
setup_helpers.py
|
fabianp/nipy
|
40e89f3ca7f34df05631623807993026134e6de3
|
[
"BSD-3-Clause"
] | null | null | null |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Build helpers for setup.py
Includes package dependency checks, and code to build the documentation
To build the docs, run::
python setup.py build_sphinx
"""
# Standard library imports
import sys
import os
from os.path import join as pjoin, dirname
import zipfile
import warnings
import shutil
from distutils.cmd import Command
from distutils.command.clean import clean
from distutils.version import LooseVersion
from distutils.dep_util import newer_group
from distutils.errors import DistutilsError
from numpy.distutils.misc_util import appendpath
from numpy.distutils import log
# Sphinx import
try:
from sphinx.setup_command import BuildDoc
except ImportError:
have_sphinx = False
else:
have_sphinx = True
# Get project related strings. Please do not change this line to use
# execfile because execfile is not available in Python 3
_info_fname = pjoin('nipy', 'info.py')
INFO_VARS = {}
exec(open(_info_fname, 'rt').read(), {}, INFO_VARS)
DOC_BUILD_DIR = os.path.join('build', 'html')
CYTHON_MIN_VERSION = INFO_VARS['CYTHON_MIN_VERSION']
################################################################################
# Distutils Command class for installing nipy to a temporary location.
class TempInstall(Command):
temp_install_dir = os.path.join('build', 'install')
def run(self):
""" build and install nipy in a temporary location. """
install = self.distribution.get_command_obj('install')
install.install_scripts = self.temp_install_dir
install.install_base = self.temp_install_dir
install.install_platlib = self.temp_install_dir
install.install_purelib = self.temp_install_dir
install.install_data = self.temp_install_dir
install.install_lib = self.temp_install_dir
install.install_headers = self.temp_install_dir
install.run()
# Horrible trick to reload nipy with our temporary instal
for key in sys.modules.keys():
if key.startswith('nipy'):
sys.modules.pop(key, None)
sys.path.append(os.path.abspath(self.temp_install_dir))
# Pop the cwd
sys.path.pop(0)
import nipy
def initialize_options(self):
pass
def finalize_options(self):
pass
################################################################################
# Distutils Command class for API generation
class APIDocs(TempInstall):
description = \
"""generate API docs """
user_options = [
('None', None, 'this command has no options'),
]
def run(self):
# First build the project and install it to a temporary location.
TempInstall.run(self)
os.chdir('doc')
try:
# We are running the API-building script via an
# system call, but overriding the import path.
toolsdir = os.path.abspath(pjoin('..', 'tools'))
build_templates = pjoin(toolsdir, 'build_modref_templates.py')
cmd = """%s -c 'import sys; sys.path.append("%s"); sys.path.append("%s"); execfile("%s", dict(__name__="__main__"))'""" \
% (sys.executable,
toolsdir,
self.temp_install_dir,
build_templates)
os.system(cmd)
finally:
os.chdir('..')
################################################################################
# Code to copy the sphinx-generated html docs in the distribution.
def relative_path(filename):
""" Return the relative path to the file, assuming the file is
in the DOC_BUILD_DIR directory.
"""
length = len(os.path.abspath(DOC_BUILD_DIR)) + 1
return os.path.abspath(filename)[length:]
################################################################################
# Distutils Command class to clean
class Clean(clean):
def run(self):
clean.run(self)
api_path = os.path.join('doc', 'api', 'generated')
if os.path.exists(api_path):
print "Removing %s" % api_path
shutil.rmtree(api_path)
if os.path.exists(DOC_BUILD_DIR):
print "Removing %s" % DOC_BUILD_DIR
shutil.rmtree(DOC_BUILD_DIR)
################################################################################
# Distutils Command class build the docs
if have_sphinx:
class MyBuildDoc(BuildDoc):
""" Sub-class the standard sphinx documentation building system, to
add logics for API generation and matplotlib's plot directive.
"""
def run(self):
self.run_command('api_docs')
# We need to be in the doc directory for to plot_directive
# and API generation to work
os.chdir('doc')
try:
BuildDoc.run(self)
finally:
os.chdir('..')
self.zip_docs()
def zip_docs(self):
if not os.path.exists(DOC_BUILD_DIR):
raise OSError, 'Doc directory does not exist.'
target_file = os.path.join('doc', 'documentation.zip')
# ZIP_DEFLATED actually compresses the archive. However, there
# will be a RuntimeError if zlib is not installed, so we check
# for it. ZIP_STORED produces an uncompressed zip, but does not
# require zlib.
try:
zf = zipfile.ZipFile(target_file, 'w',
compression=zipfile.ZIP_DEFLATED)
except RuntimeError:
warnings.warn('zlib not installed, storing the docs '
'without compression')
zf = zipfile.ZipFile(target_file, 'w',
compression=zipfile.ZIP_STORED)
for root, dirs, files in os.walk(DOC_BUILD_DIR):
relative = relative_path(root)
if not relative.startswith('.doctrees'):
for f in files:
zf.write(os.path.join(root, f),
os.path.join(relative, 'html_docs', f))
zf.close()
def finalize_options(self):
""" Override the default for the documentation build
directory.
"""
self.build_dir = os.path.join(*DOC_BUILD_DIR.split(os.sep)[:-1])
BuildDoc.finalize_options(self)
else: # failed Sphinx import
# Raise an error when trying to build docs
class MyBuildDoc(Command):
user_options = []
def run(self):
raise ImportError(
"Sphinx is not installed, docs cannot be built")
def initialize_options(self):
pass
def finalize_options(self):
pass
# The command classes for distutils, used by setup.py
cmdclass = {'api_docs': APIDocs,
'clean': Clean,
'build_sphinx': MyBuildDoc}
def have_good_cython():
try:
from Cython.Compiler.Version import version
except ImportError:
return False
return LooseVersion(version) >= LooseVersion(CYTHON_MIN_VERSION)
def generate_a_pyrex_source(self, base, ext_name, source, extension):
''' Monkey patch for numpy build_src.build_src method
Uses Cython instead of Pyrex.
'''
good_cython = have_good_cython()
if self.inplace or not good_cython:
target_dir = dirname(base)
else:
target_dir = appendpath(self.build_src, dirname(base))
target_file = pjoin(target_dir, ext_name + '.c')
depends = [source] + extension.depends
sources_changed = newer_group(depends, target_file, 'newer')
if self.force or sources_changed:
if good_cython:
# add distribution (package-wide) include directories, in order
# to pick up needed .pxd files for cython compilation
incl_dirs = extension.include_dirs[:]
dist_incl_dirs = self.distribution.include_dirs
if not dist_incl_dirs is None:
incl_dirs += dist_incl_dirs
import Cython.Compiler.Main
log.info("cythonc:> %s" % (target_file))
self.mkpath(target_dir)
options = Cython.Compiler.Main.CompilationOptions(
defaults=Cython.Compiler.Main.default_options,
include_path=incl_dirs,
output_file=target_file)
cython_result = Cython.Compiler.Main.compile(source,
options=options)
if cython_result.num_errors != 0:
raise DistutilsError("%d errors while compiling "
"%r with Cython"
% (cython_result.num_errors, source))
elif sources_changed and os.path.isfile(target_file):
raise DistutilsError("Cython >=%s required for compiling %r"
" because sources (%s) have changed" %
(CYTHON_MIN_VERSION, source, ','.join(depends)))
else:
raise DistutilsError("Cython >=%s required for compiling %r"
" but not available" %
(CYTHON_MIN_VERSION, source))
return target_file
| 36.515504
| 133
| 0.578389
|
56cb929d13460820a169a8ebd6eb9a774fc3752e
| 6,043
|
py
|
Python
|
python/qpid_dispatch_internal/management/schema_doc.py
|
kishorkunal-raj/qpid-dispatch
|
f629b448dc1ae92d46c31f3c8d7bf317412b9e22
|
[
"Apache-2.0"
] | 1
|
2019-03-17T04:08:43.000Z
|
2019-03-17T04:08:43.000Z
|
python/qpid_dispatch_internal/management/schema_doc.py
|
kishorkunal-raj/qpid-dispatch
|
f629b448dc1ae92d46c31f3c8d7bf317412b9e22
|
[
"Apache-2.0"
] | 121
|
2020-09-16T06:03:53.000Z
|
2022-03-30T13:03:23.000Z
|
python/qpid_dispatch_internal/management/schema_doc.py
|
irinabov/debian-qpid-dispatch
|
42fb2ffb65f8e8c8d616633c0b4308d6531a281d
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""Library for generating asciidoc documentation from a L{schema.Schema}"""
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from collections import namedtuple
import sys
from .schema import AttributeType
from qpid_dispatch_internal.compat import PY_STRING_TYPE, dict_itervalues
class SchemaWriter(object):
"""Write the schema as an asciidoc document"""
def __init__(self, output, schema, quiet=True):
self.output, self.schema, self.quiet = output, schema, quiet
self._heading = 0
# Options affecting how output is written
def warn(self, message):
if not self.quiet: print(message, file=sys.stderr)
def write(self, text): self.output.write(text)
def writeln(self, text=""): self.output.write(text+"\n")
def para(self, text): self.write(text+"\n\n")
def heading(self, text=None, sub=0):
self._heading += sub
if text: self.para("\n=%s %s" % ("="*self._heading, text))
class Section(namedtuple("Section", ["writer", "heading"])):
def __enter__(self): self.writer.heading(self.heading, sub=+1)
def __exit__(self, ex, value, trace): self.writer.heading(sub=-1)
def section(self, heading): return self.Section(self, heading)
def attribute_qualifiers(self, attr, show_create=True, show_update=True):
default = attr.default
if isinstance(default, PY_STRING_TYPE) and default.startswith('$'):
default = None # Don't show defaults that are references, confusing.
return ' (%s)' % (', '.join(
filter(None, [str(attr.atype),
default and "default='%s'" % default,
attr.required and "required",
attr.unique and "unique",
show_create and attr.create and "`CREATE`",
show_update and attr.update and "`UPDATE`"
])))
def attribute_type(self, attr, holder=None, show_create=True, show_update=True):
self.writeln("'%s'%s::" % (
attr.name, self.attribute_qualifiers(attr, show_create, show_update)))
if attr.description:
self.writeln(" %s" % attr.description)
else:
self.warn("Warning: No description for %s in %s" % (attr, attr.defined_in.short_name))
self.writeln()
def attribute_types(self, holder):
holder_attributes = holder.my_attributes
for attr in holder_attributes:
if attr.deprecation_name:
deprecated_attr = AttributeType(attr.deprecation_name, type=attr.type, defined_in=attr.defined_in,
default=attr.default, required=attr.required, unique=attr.unique,
hidden=attr.hidden, deprecated=True, value=attr.value,
description="(DEPRECATED) " + attr.description
+ " This attribute has been deprecated. Use " +
attr.name + " instead.",
create=attr.create, update=attr.update,
graph=attr.graph)
holder_attributes.append(deprecated_attr)
for attr in holder_attributes:
self.attribute_type(attr, holder)
def operation_def(self, op, holder):
def request_response(what):
message = getattr(op, what)
if message:
if message.body:
self.para(".%s body%s\n\n%s" % (
what.capitalize(), self.attribute_qualifiers(message.body),
message.body.description))
if message.properties:
self.para(".%s properties" % (what.capitalize()))
for prop in dict_itervalues(message.properties):
self.attribute_type(prop)
with self.section("Operation %s" % op.name):
if op.description: self.para(op.description)
request_response("request")
request_response("response")
def operation_defs(self, entity_type):
for op in dict_itervalues(entity_type.operation_defs):
self.operation_def(op, entity_type)
def entity_type(self, entity_type, operation_defs=True):
with self.section(entity_type.short_name):
if entity_type.description:
self.para('%s' % entity_type.description)
else:
self.warn("Warning no description for %s" % entity_type)
if entity_type.operations:
self.para("Operations allowed: `%s`\n\n" % "`, `".join(entity_type.operations))
self.attribute_types(entity_type)
if entity_type.operation_defs:
self.operation_defs(entity_type)
def entity_types_extending(self, base_name):
base = self.schema.entity_type(base_name)
for entity_type in self.schema.filter(lambda t: t.extends(base)):
self.entity_type(entity_type)
| 43.47482
| 114
| 0.6093
|
c1ea4ee4ee36cc8a4d0b03d43f9f922b601a89e7
| 3,267
|
py
|
Python
|
pdtable/io/excel.py
|
guilhermebs/pdtable
|
7821b015f5c92a0be5124f4527717c99b802f45c
|
[
"BSD-3-Clause"
] | null | null | null |
pdtable/io/excel.py
|
guilhermebs/pdtable
|
7821b015f5c92a0be5124f4527717c99b802f45c
|
[
"BSD-3-Clause"
] | null | null | null |
pdtable/io/excel.py
|
guilhermebs/pdtable
|
7821b015f5c92a0be5124f4527717c99b802f45c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Interface to read/write Tables from/to an Excel workbook.
The only Excel I/O engine supported right now is 'openpyxl', but this module can
be extended to support others readers such as 'xlrd' and writers such as 'xlsxwriter'.
openpyxl (and eventually other engines) are not required at install time; only when the functions
requiring them (read_excel() or write_excel()) are called for the first time.
"""
import os
from os import PathLike
from typing import Union, Callable, Iterable, BinaryIO
from .parsers.blocks import parse_blocks
from .parsers.fixer import ParseFixer
from .. import BlockType, Table, TableBundle
from ..store import BlockIterator
def read_excel(
source: Union[str, PathLike, BinaryIO],
origin=None,
fixer: ParseFixer = None,
to: str = "pdtable",
filter: Callable[[BlockType, str], bool] = None,
) -> BlockIterator:
"""Reads StarTable blocks from an Excel workbook.
# TODO copy most of read_csv() docstring over
Reads StarTable blocks from an Excel workbook file at the specified path.
Yields them one at a time as a tuple: (block type, block content)
Args:
source:
Path of workbook to read.
Yields:
Tuples of the form (block type, block content)
"""
kwargs = {"origin": origin, "fixer": fixer, "to": to, "filter": filter}
try:
from ._excel_openpyxl import read_cell_rows_openpyxl as read_cell_rows
except ImportError as err:
raise ImportError(
"Unable to find a usable Excel engine. "
"Tried using: 'openpyxl'.\n"
"Please install openpyxl for Excel I/O support."
) from err
yield from parse_blocks(read_cell_rows(source), **kwargs)
def write_excel(
tables: Union[Table, Iterable[Table], TableBundle],
to: Union[str, os.PathLike, BinaryIO],
na_rep: str = "-",
):
"""Writes one or more tables to an Excel workbook.
Writes table blocks to an Excel workbook file.
Values are formatted to comply with the StarTable standard where necessary and possible.
This is a thin wrapper around parse_blocks(). The only thing it does is to present the contents
of an Excel workbook as a Iterable of cell rows, where each row is a sequence of values.
Args:
tables:
Table(s) to write. Can be a single Table or an iterable of Tables.
to:
File path or binary stream to which to write.
If a file path, then this file gets created/overwritten and then closed after writing.
If a stream, then it is left open after writing; the caller is responsible for managing
the stream.
na_rep:
Optional; String representation of missing values (NaN, None, NaT).
If overriding the default '-', it is recommended to use another value compliant with
the StarTable standard.
"""
try:
from ._excel_openpyxl import write_excel_openpyxl as write_excel_func
except ImportError as err:
raise ImportError(
"Unable to find a usable spreadsheet engine. "
"Tried using: 'openpyxl'.\n"
"Please install openpyxl for Excel I/O support."
) from err
write_excel_func(tables, to, na_rep)
| 34.389474
| 99
| 0.677074
|
5cedc083de6aeab00b5204a041e6d83206c1f0d8
| 73,457
|
py
|
Python
|
src/sage/functions/orthogonal_polys.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | 1
|
2020-08-30T04:27:27.000Z
|
2020-08-30T04:27:27.000Z
|
src/sage/functions/orthogonal_polys.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/functions/orthogonal_polys.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | 3
|
2020-03-29T17:13:36.000Z
|
2021-05-03T18:11:28.000Z
|
r"""
Orthogonal Polynomials
- The Chebyshev polynomial of the first kind arises as a solution
to the differential equation
.. MATH::
(1-x^2)\,y'' - x\,y' + n^2\,y = 0
and those of the second kind as a solution to
.. MATH::
(1-x^2)\,y'' - 3x\,y' + n(n+2)\,y = 0.
The Chebyshev polynomials of the first kind are defined by the
recurrence relation
.. MATH::
T_0(x) = 1 \, T_1(x) = x \, T_{n+1}(x) = 2xT_n(x) - T_{n-1}(x). \,
The Chebyshev polynomials of the second kind are defined by the
recurrence relation
.. MATH::
U_0(x) = 1 \, U_1(x) = 2x \, U_{n+1}(x) = 2xU_n(x) - U_{n-1}(x). \,
For integers `m,n`, they satisfy the orthogonality
relations
.. MATH::
\int_{-1}^1 T_n(x)T_m(x)\,\frac{dx}{\sqrt{1-x^2}} =\left\{ \begin{matrix} 0 &: n\ne m~~~~~\\ \pi &: n=m=0\\ \pi/2 &: n=m\ne 0 \end{matrix} \right.
and
.. MATH::
\int_{-1}^1 U_n(x)U_m(x)\sqrt{1-x^2}\,dx =\frac{\pi}{2}\delta_{m,n}.
They are named after Pafnuty Chebyshev (alternative
transliterations: Tchebyshef or Tschebyscheff).
- The Hermite polynomials are defined either by
.. MATH::
H_n(x)=(-1)^n e^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2}
(the "probabilists' Hermite polynomials"), or by
.. MATH::
H_n(x)=(-1)^n e^{x^2}\frac{d^n}{dx^n}e^{-x^2}
(the "physicists' Hermite polynomials"). Sage (via Maxima)
implements the latter flavor. These satisfy the orthogonality
relation
.. MATH::
\int_{-\infty}^\infty H_n(x)H_m(x)\,e^{-x^2}\,dx ={n!2^n}{\sqrt{\pi}}\delta_{nm}
They are named in honor of Charles Hermite.
- Each *Legendre polynomial* `P_n(x)` is an `n`-th degree polynomial.
It may be expressed using Rodrigues' formula:
.. MATH::
P_n(x) = (2^n n!)^{-1} {\frac{d^n}{dx^n} } \left[ (x^2 -1)^n \right].
These are solutions to Legendre's differential equation:
.. MATH::
{\frac{d}{dx}} \left[ (1-x^2) {\frac{d}{dx}} P(x) \right] + n(n+1)P(x) = 0.
and satisfy the orthogonality relation
.. MATH::
\int_{-1}^{1} P_m(x) P_n(x)\,dx = {\frac{2}{2n + 1}} \delta_{mn}
The *Legendre function of the second kind* `Q_n(x)` is another
(linearly independent) solution to the Legendre differential equation.
It is not an "orthogonal polynomial" however.
The associated Legendre functions of the first kind
`P_\ell^m(x)` can be given in terms of the "usual"
Legendre polynomials by
.. MATH::
\begin{array}{ll} P_\ell^m(x) &= (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}P_\ell(x) \\ &= \frac{(-1)^m}{2^\ell \ell!} (1-x^2)^{m/2}\frac{d^{\ell+m}}{dx^{\ell+m}}(x^2-1)^\ell. \end{array}
Assuming `0 \le m \le \ell`, they satisfy the orthogonality
relation:
.. MATH::
\int_{-1}^{1} P_k ^{(m)} P_\ell ^{(m)} dx = \frac{2 (\ell+m)!}{(2\ell+1)(\ell-m)!}\ \delta _{k,\ell},
where `\delta _{k,\ell}` is the Kronecker delta.
The associated Legendre functions of the second kind
`Q_\ell^m(x)` can be given in terms of the "usual"
Legendre polynomials by
.. MATH::
Q_\ell^m(x) = (-1)^m(1-x^2)^{m/2}\frac{d^m}{dx^m}Q_\ell(x).
They are named after Adrien-Marie Legendre.
- Laguerre polynomials may be defined by the Rodrigues formula
.. MATH::
L_n(x)=\frac{e^x}{n!}\frac{d^n}{dx^n}\left(e^{-x} x^n\right).
They are solutions of Laguerre's equation:
.. MATH::
x\,y'' + (1 - x)\,y' + n\,y = 0\,
and satisfy the orthogonality relation
.. MATH::
\int_0^\infty L_m(x) L_n(x) e^{-x}\,dx = \delta_{mn}.
The generalized Laguerre polynomials may be defined by the
Rodrigues formula:
.. MATH::
L_n^{(\alpha)}(x) = {\frac{x^{-\alpha} e^x}{n!}}{\frac{d^n}{dx^n}} \left(e^{-x} x^{n+\alpha}\right) .
(These are also sometimes called the associated Laguerre
polynomials.) The simple Laguerre polynomials are recovered from
the generalized polynomials by setting `\alpha =0`.
They are named after Edmond Laguerre.
- Jacobi polynomials are a class of orthogonal polynomials. They
are obtained from hypergeometric series in cases where the series
is in fact finite:
.. MATH::
P_n^{(\alpha,\beta)}(z) =\frac{(\alpha+1)_n}{n!} \,_2F_1\left(-n,1+\alpha+\beta+n;\alpha+1;\frac{1-z}{2}\right) ,
where `()_n` is Pochhammer's symbol (for the rising
factorial), (Abramowitz and Stegun p561.) and thus have the
explicit expression
.. MATH::
P_n^{(\alpha,\beta)} (z) = \frac{\Gamma (\alpha+n+1)}{n!\Gamma (\alpha+\beta+n+1)} \sum_{m=0}^n \binom{n}{m} \frac{\Gamma (\alpha + \beta + n + m + 1)}{\Gamma (\alpha + m + 1)} \left(\frac{z-1}{2}\right)^m .
They are named after Carl Jacobi.
- Ultraspherical or Gegenbauer polynomials are given in terms of
the Jacobi polynomials `P_n^{(\alpha,\beta)}(x)` with
`\alpha=\beta=a-1/2` by
.. MATH::
C_n^{(a)}(x)= \frac{\Gamma(a+1/2)}{\Gamma(2a)}\frac{\Gamma(n+2a)}{\Gamma(n+a+1/2)} P_n^{(a-1/2,a-1/2)}(x).
They satisfy the orthogonality relation
.. MATH::
\int_{-1}^1(1-x^2)^{a-1/2}C_m^{(a)}(x)C_n^{(a)}(x)\, dx =\delta_{mn}2^{1-2a}\pi \frac{\Gamma(n+2a)}{(n+a)\Gamma^2(a)\Gamma(n+1)} ,
for `a>-1/2`. They are obtained from hypergeometric series
in cases where the series is in fact finite:
.. MATH::
C_n^{(a)}(z) =\frac{(2a)^{\underline{n}}}{n!} \,_2F_1\left(-n,2a+n;a+\frac{1}{2};\frac{1-z}{2}\right)
where `\underline{n}` is the falling factorial. (See
Abramowitz and Stegun p561)
They are named for Leopold Gegenbauer (1849-1903).
For completeness, the Pochhammer symbol, introduced by Leo August
Pochhammer, `(x)_n`, is used in the theory of special
functions to represent the "rising factorial" or "upper factorial"
.. MATH::
(x)_n=x(x+1)(x+2)\cdots(x+n-1)=\frac{(x+n-1)!}{(x-1)!}.
On the other hand, the "falling factorial" or "lower factorial" is
.. MATH::
x^{\underline{n}}=\frac{x!}{(x-n)!} ,
in the notation of Ronald L. Graham, Donald E. Knuth and Oren
Patashnik in their book Concrete Mathematics.
.. TODO::
Implement Zernike polynomials.
:wikipedia:`Zernike_polynomials`
REFERENCES:
- [AS1964]_
- :wikipedia:`Chebyshev_polynomials`
- :wikipedia:`Legendre_polynomials`
- :wikipedia:`Hermite_polynomials`
- http://mathworld.wolfram.com/GegenbauerPolynomial.html
- :wikipedia:`Jacobi_polynomials`
- :wikipedia:`Laguerre_polynomia`
- :wikipedia:`Associated_Legendre_polynomials`
- [Koe1999]_
AUTHORS:
- David Joyner (2006-06)
- Stefan Reiterer (2010-)
- Ralf Stephan (2015-)
The original module wrapped some of the orthogonal/special functions
in the Maxima package "orthopoly" and was written by Barton
Willis of the University of Nebraska at Kearney.
"""
# ****************************************************************************
# Copyright (C) 2006 William Stein <wstein@gmail.com>
# 2006 David Joyner <wdj@usna.edu>
# 2010 Stefan Reiterer <maldun.finsterschreck@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from six.moves import range
import warnings
from sage.misc.latex import latex
from sage.rings.all import ZZ, QQ, RR, CC
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.real_mpfr import is_RealField
from sage.rings.complex_field import is_ComplexField
from sage.symbolic.function import BuiltinFunction, GinacFunction
from sage.symbolic.expression import Expression
from sage.symbolic.all import SR
from sage.functions.other import factorial, binomial
from sage.structure.all import parent
class OrthogonalFunction(BuiltinFunction):
"""
Base class for orthogonal polynomials.
This class is an abstract base class for all orthogonal polynomials since
they share similar properties. The evaluation as a polynomial
is either done via maxima, or with pynac.
Convention: The first argument is always the order of the polynomial,
the others are other values or parameters where the polynomial is
evaluated.
"""
def __init__(self, name, nargs=2, latex_name=None, conversions={}):
"""
:class:`OrthogonalFunction` class needs the same input parameter as
it's parent class.
EXAMPLES::
sage: from sage.functions.orthogonal_polys import OrthogonalFunction
sage: new = OrthogonalFunction('testo_P')
sage: new
testo_P
"""
try:
self._maxima_name = conversions['maxima']
except KeyError:
self._maxima_name = None
super(OrthogonalFunction,self).__init__(name=name, nargs=nargs,
latex_name=latex_name, conversions=conversions)
def eval_formula(self, *args):
"""
Evaluate this polynomial using an explicit formula.
EXAMPLES::
sage: from sage.functions.orthogonal_polys import OrthogonalFunction
sage: P = OrthogonalFunction('testo_P')
sage: P.eval_formula(1,2.0)
Traceback (most recent call last):
...
NotImplementedError: no explicit calculation of values implemented
"""
raise NotImplementedError("no explicit calculation of values implemented")
def _eval_special_values_(self, *args):
"""
Evaluate the polynomial explicitly for special values.
EXAMPLES::
sage: var('n')
n
sage: chebyshev_T(n,-1)
(-1)^n
"""
raise ValueError("no special values known")
def _eval_(self, n, *args):
"""
The :meth:`_eval_()` method decides which evaluation suits best
for the given input, and returns a proper value.
EXAMPLES::
sage: var('n,x')
(n, x)
sage: chebyshev_T(5,x)
16*x^5 - 20*x^3 + 5*x
"""
return None
def __call__(self, *args, **kwds):
"""
This overides the call method from SageObject to avoid problems with coercions,
since the _eval_ method is able to handle more data types than symbolic functions
would normally allow.
Thus we have the distinction between algebraic objects (if n is an integer),
and else as symbolic function.
EXAMPLES::
sage: chebyshev_T(5, x)
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(5, x, algorithm='pari')
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(5, x, algorithm='maxima')
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(5, x, algorithm='recursive')
16*x^5 - 20*x^3 + 5*x
"""
algorithm = kwds.get('algorithm', None)
if algorithm == 'pari':
return self.eval_pari(*args, **kwds)
elif algorithm == 'recursive':
return self.eval_recursive(*args, **kwds)
elif algorithm == 'maxima':
from sage.calculus.calculus import maxima
kwds['hold'] = True
return maxima(self._eval_(*args, **kwds))._sage_()
return super(OrthogonalFunction,self).__call__(*args, **kwds)
class ChebyshevFunction(OrthogonalFunction):
"""
Abstract base class for Chebyshev polynomials of the first and second kind.
EXAMPLES::
sage: chebyshev_T(3,x)
4*x^3 - 3*x
"""
def __call__(self, n, *args, **kwds):
"""
This overides the call method from SageObject to avoid problems with coercions,
since the _eval_ method is able to handle more data types than symbolic functions
would normally allow.
Thus we have the distinction between algebraic objects (if n is an integer),
and else as symbolic function.
EXAMPLES::
sage: K.<a> = NumberField(x^3-x-1)
sage: chebyshev_T(5, a)
16*a^2 + a - 4
sage: chebyshev_T(5,MatrixSpace(ZZ, 2)([1, 2, -4, 7]))
[-40799 44162]
[-88324 91687]
sage: R.<x> = QQ[]
sage: parent(chebyshev_T(5, x))
Univariate Polynomial Ring in x over Rational Field
sage: chebyshev_T(5, 2, hold=True)
chebyshev_T(5, 2)
sage: chebyshev_T(1,2,3)
Traceback (most recent call last):
...
TypeError: Symbolic function chebyshev_T takes exactly 2 arguments (3 given)
"""
# If n is an integer: consider the polynomial as an algebraic (not symbolic) object
if n in ZZ and not kwds.get('hold', False):
try:
return self._eval_(n, *args)
except Exception:
pass
return super(ChebyshevFunction,self).__call__(n, *args, **kwds)
def _eval_(self, n, x):
"""
The :meth:`_eval_()` method decides which evaluation suits best
for the given input, and returns a proper value.
EXAMPLES::
sage: var('n,x')
(n, x)
sage: chebyshev_T(5,x)
16*x^5 - 20*x^3 + 5*x
sage: chebyshev_T(64, x)
2*(2*(2*(2*(2*(2*x^2 - 1)^2 - 1)^2 - 1)^2 - 1)^2 - 1)^2 - 1
sage: chebyshev_T(n,-1)
(-1)^n
sage: chebyshev_T(-7,x)
64*x^7 - 112*x^5 + 56*x^3 - 7*x
sage: chebyshev_T(3/2,x)
chebyshev_T(3/2, x)
sage: R.<t> = QQ[]
sage: chebyshev_T(2,t)
2*t^2 - 1
sage: chebyshev_U(2,t)
4*t^2 - 1
sage: parent(chebyshev_T(4, RIF(5)))
Real Interval Field with 53 bits of precision
sage: RR2 = RealField(5)
sage: chebyshev_T(100000,RR2(2))
8.9e57180
sage: chebyshev_T(5,Qp(3)(2))
2 + 3^2 + 3^3 + 3^4 + 3^5 + O(3^20)
sage: chebyshev_T(100001/2, 2)
doctest:...: RuntimeWarning: mpmath failed, keeping expression unevaluated
chebyshev_T(100001/2, 2)
sage: chebyshev_U._eval_(1.5, Mod(8,9)) is None
True
"""
# n is an integer => evaluate algebraically (as polynomial)
if n in ZZ:
n = ZZ(n)
# Expanded symbolic expression only for small values of n
if isinstance(x, Expression) and n.abs() < 32:
return self.eval_formula(n, x)
return self.eval_algebraic(n, x)
if isinstance(x, Expression) or isinstance(n, Expression):
# Check for known identities
try:
return self._eval_special_values_(n, x)
except ValueError:
# Don't evaluate => keep symbolic
return None
# n is not an integer and neither n nor x is symbolic.
# We assume n and x are real/complex and evaluate numerically
try:
import sage.libs.mpmath.all as mpmath
return self._evalf_(n, x)
except mpmath.NoConvergence:
warnings.warn("mpmath failed, keeping expression unevaluated",
RuntimeWarning)
return None
except Exception:
# Numerical evaluation failed => keep symbolic
return None
class Func_chebyshev_T(ChebyshevFunction):
"""
Chebyshev polynomials of the first kind.
REFERENCE:
- [AS1964]_ 22.5.31 page 778 and 6.1.22 page 256.
EXAMPLES::
sage: chebyshev_T(5,x)
16*x^5 - 20*x^3 + 5*x
sage: var('k')
k
sage: test = chebyshev_T(k,x)
sage: test
chebyshev_T(k, x)
"""
def __init__(self):
"""
Init method for the chebyshev polynomials of the first kind.
EXAMPLES::
sage: var('n, x')
(n, x)
sage: from sage.functions.orthogonal_polys import Func_chebyshev_T
sage: chebyshev_T2 = Func_chebyshev_T()
sage: chebyshev_T2(1,x)
x
sage: chebyshev_T(x, x)._sympy_()
chebyshevt(x, x)
sage: maxima(chebyshev_T(1,x, hold=True))
_SAGE_VAR_x
sage: maxima(chebyshev_T(n, chebyshev_T(n, x)))
chebyshev_t(_SAGE_VAR_n,chebyshev_t(_SAGE_VAR_n,_SAGE_VAR_x))
"""
ChebyshevFunction.__init__(self, 'chebyshev_T', nargs=2,
conversions=dict(maxima='chebyshev_t',
mathematica='ChebyshevT',
sympy='chebyshevt',
giac='tchebyshev1'))
def _latex_(self):
r"""
TESTS::
sage: latex(chebyshev_T)
T_n
"""
return r"T_n"
def _print_latex_(self, n, z):
r"""
TESTS::
sage: latex(chebyshev_T(3, x, hold=True))
T_{3}\left(x\right)
"""
return r"T_{{{}}}\left({}\right)".format(latex(n), latex(z))
def _eval_special_values_(self, n, x):
"""
Values known for special values of x.
For details see [AS1964]_ 22.4 (p. 777)
EXAMPLES::
sage: var('n')
n
sage: chebyshev_T(n,1)
1
sage: chebyshev_T(n,0)
1/2*(-1)^(1/2*n)*((-1)^n + 1)
sage: chebyshev_T(n,-1)
(-1)^n
sage: chebyshev_T._eval_special_values_(3/2,x)
Traceback (most recent call last):
...
ValueError: no special value found
sage: chebyshev_T._eval_special_values_(n, 0.1)
Traceback (most recent call last):
...
ValueError: no special value found
"""
if x == 1:
return x
if x == -1:
return x**n
if x == 0:
return (1+(-1)**n)*(-1)**(n/2)/2
raise ValueError("no special value found")
def _evalf_(self, n, x, **kwds):
"""
Evaluates :class:`chebyshev_T` numerically with mpmath.
EXAMPLES::
sage: chebyshev_T._evalf_(10,3)
2.26195370000000e7
sage: chebyshev_T._evalf_(10,3,parent=RealField(75))
2.261953700000000000000e7
sage: chebyshev_T._evalf_(10,I)
-3363.00000000000
sage: chebyshev_T._evalf_(5,0.3)
0.998880000000000
sage: chebyshev_T(1/2, 0)
0.707106781186548
sage: chebyshev_T(1/2, 3/2)
1.11803398874989
sage: chebyshev_T._evalf_(1.5, Mod(8,9))
Traceback (most recent call last):
...
TypeError: cannot evaluate chebyshev_T with parent Ring of integers modulo 9
This simply evaluates using :class:`RealField` or :class:`ComplexField`::
sage: chebyshev_T(1234.5, RDF(2.1))
5.48174256255782e735
sage: chebyshev_T(1234.5, I)
-1.21629397684152e472 - 1.21629397684152e472*I
For large values of ``n``, mpmath fails (but the algebraic formula
still works)::
sage: chebyshev_T._evalf_(10^6, 0.1)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
sage: chebyshev_T(10^6, 0.1)
0.636384327171504
"""
try:
real_parent = kwds['parent']
except KeyError:
real_parent = parent(x)
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
# parent is not a real or complex field: figure out a good parent
if x in RR:
x = RR(x)
real_parent = RR
elif x in CC:
x = CC(x)
real_parent = CC
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
raise TypeError("cannot evaluate chebyshev_T with parent {}".format(real_parent))
from sage.libs.mpmath.all import call as mpcall
from sage.libs.mpmath.all import chebyt as mpchebyt
return mpcall(mpchebyt, n, x, parent=real_parent)
def eval_formula(self, n, x):
"""
Evaluate ``chebyshev_T`` using an explicit formula.
See [AS1964]_ 227 (p. 782) for details for the recursions.
See also [Koe1999]_ for fast evaluation techniques.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_T.eval_formula(-1,x)
x
sage: chebyshev_T.eval_formula(0,x)
1
sage: chebyshev_T.eval_formula(1,x)
x
sage: chebyshev_T.eval_formula(2,0.1) == chebyshev_T._evalf_(2,0.1)
True
sage: chebyshev_T.eval_formula(10,x)
512*x^10 - 1280*x^8 + 1120*x^6 - 400*x^4 + 50*x^2 - 1
sage: chebyshev_T.eval_algebraic(10,x).expand()
512*x^10 - 1280*x^8 + 1120*x^6 - 400*x^4 + 50*x^2 - 1
"""
if n < 0:
return self.eval_formula(-n, x)
elif n == 0:
return parent(x).one()
res = parent(x).zero()
for j in range(n // 2 + 1):
f = factorial(n-1-j) / factorial(j) / factorial(n-2*j)
res += (-1)**j * (2*x)**(n-2*j) * f
res *= n/2
return res
def eval_algebraic(self, n, x):
"""
Evaluate :class:`chebyshev_T` as polynomial, using a recursive
formula.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_T.eval_algebraic(5, x)
2*(2*(2*x^2 - 1)*x - x)*(2*x^2 - 1) - x
sage: chebyshev_T(-7, x) - chebyshev_T(7,x)
0
sage: R.<t> = ZZ[]
sage: chebyshev_T.eval_algebraic(-1, t)
t
sage: chebyshev_T.eval_algebraic(0, t)
1
sage: chebyshev_T.eval_algebraic(1, t)
t
sage: chebyshev_T(7^100, 1/2)
1/2
sage: chebyshev_T(7^100, Mod(2,3))
2
sage: n = 97; x = RIF(pi/2/n)
sage: chebyshev_T(n, cos(x)).contains_zero()
True
sage: R.<t> = Zp(2, 8, 'capped-abs')[]
sage: chebyshev_T(10^6+1, t)
(2^7 + O(2^8))*t^5 + O(2^8)*t^4 + (2^6 + O(2^8))*t^3 + O(2^8)*t^2 + (1 + 2^6 + O(2^8))*t + O(2^8)
"""
if n == 0:
return parent(x).one()
if n < 0:
return self._eval_recursive_(-n, x)[0]
return self._eval_recursive_(n, x)[0]
def _eval_recursive_(self, n, x, both=False):
"""
If ``both=True``, compute ``(T(n,x), T(n-1,x))`` using a
recursive formula.
If ``both=False``, return instead a tuple ``(T(n,x), False)``.
EXAMPLES::
sage: chebyshev_T._eval_recursive_(5, x)
(2*(2*(2*x^2 - 1)*x - x)*(2*x^2 - 1) - x, False)
sage: chebyshev_T._eval_recursive_(5, x, True)
(2*(2*(2*x^2 - 1)*x - x)*(2*x^2 - 1) - x, 2*(2*x^2 - 1)^2 - 1)
"""
if n == 1:
return x, parent(x).one()
assert n >= 2
a, b = self._eval_recursive_((n+1)//2, x, both or n % 2)
if n % 2 == 0:
return 2*a*a - 1, both and 2*a*b - x
else:
return 2*a*b - x, both and 2*b*b - 1
def _eval_numpy_(self, n, x):
"""
Evaluate ``self`` using numpy.
EXAMPLES::
sage: import numpy
sage: z = numpy.array([1,2])
sage: z2 = numpy.array([[1,2],[1,2]])
sage: z3 = numpy.array([1,2,3.])
sage: chebyshev_T(1,z)
array([1., 2.])
sage: chebyshev_T(1,z2)
array([[1., 2.],
[1., 2.]])
sage: chebyshev_T(1,z3)
array([1., 2., 3.])
sage: chebyshev_T(z,0.1)
array([ 0.1 , -0.98])
"""
from scipy.special import eval_chebyt
return eval_chebyt(n, x)
def _derivative_(self, n, x, diff_param):
"""
Return the derivative of :class:`chebyshev_T` in form of the Chebyshev
polynomial of the second kind :class:`chebyshev_U`.
EXAMPLES::
sage: var('k')
k
sage: derivative(chebyshev_T(k,x),x)
k*chebyshev_U(k - 1, x)
sage: derivative(chebyshev_T(3,x),x)
12*x^2 - 3
sage: derivative(chebyshev_T(k,x),k)
Traceback (most recent call last):
...
NotImplementedError: derivative w.r.t. to the index is not supported yet
"""
if diff_param == 0:
raise NotImplementedError("derivative w.r.t. to the index is not supported yet")
elif diff_param == 1:
return n*chebyshev_U(n-1, x)
raise ValueError("illegal differentiation parameter {}".format(diff_param))
chebyshev_T = Func_chebyshev_T()
class Func_chebyshev_U(ChebyshevFunction):
"""
Class for the Chebyshev polynomial of the second kind.
REFERENCE:
- [AS1964]_ 22.8.3 page 783 and 6.1.22 page 256.
EXAMPLES::
sage: R.<t> = QQ[]
sage: chebyshev_U(2,t)
4*t^2 - 1
sage: chebyshev_U(3,t)
8*t^3 - 4*t
"""
def __init__(self):
"""
Init method for the chebyshev polynomials of the second kind.
EXAMPLES::
sage: var('n, x')
(n, x)
sage: from sage.functions.orthogonal_polys import Func_chebyshev_U
sage: chebyshev_U2 = Func_chebyshev_U()
sage: chebyshev_U2(1,x)
2*x
sage: chebyshev_U(x, x)._sympy_()
chebyshevu(x, x)
sage: maxima(chebyshev_U(2,x, hold=True))
3*((-(8*(1-_SAGE_VAR_x))/3)+(4*(1-_SAGE_VAR_x)^2)/3+1)
sage: maxima(chebyshev_U(n,x, hold=True))
chebyshev_u(_SAGE_VAR_n,_SAGE_VAR_x)
"""
ChebyshevFunction.__init__(self, 'chebyshev_U', nargs=2,
conversions=dict(maxima='chebyshev_u',
mathematica='ChebyshevU',
sympy='chebyshevu',
giac='tchebyshev2'))
def _latex_(self):
r"""
TESTS::
sage: latex(chebyshev_U)
U_n
"""
return r"U_n"
def _print_latex_(self, n, z):
r"""
TESTS::
sage: latex(chebyshev_U(3, x, hold=True))
U_{3}\left(x\right)
"""
return r"U_{{{}}}\left({}\right)".format(latex(n), latex(z))
def eval_formula(self, n, x):
"""
Evaluate ``chebyshev_U`` using an explicit formula.
See [AS1964]_ 227 (p. 782) for details on the recursions.
See also [Koe1999]_ for the recursion formulas.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_U.eval_formula(10, x)
1024*x^10 - 2304*x^8 + 1792*x^6 - 560*x^4 + 60*x^2 - 1
sage: chebyshev_U.eval_formula(-2, x)
-1
sage: chebyshev_U.eval_formula(-1, x)
0
sage: chebyshev_U.eval_formula(0, x)
1
sage: chebyshev_U.eval_formula(1, x)
2*x
sage: chebyshev_U.eval_formula(2,0.1) == chebyshev_U._evalf_(2,0.1)
True
"""
if n < -1:
return -self.eval_formula(-n-2, x)
res = parent(x).zero()
for j in range(n // 2 + 1):
f = binomial(n-j, j)
res += (-1)**j * (2*x)**(n-2*j) * f
return res
def eval_algebraic(self, n, x):
"""
Evaluate :class:`chebyshev_U` as polynomial, using a recursive
formula.
INPUT:
- ``n`` -- an integer
- ``x`` -- a value to evaluate the polynomial at (this can be
any ring element)
EXAMPLES::
sage: chebyshev_U.eval_algebraic(5,x)
-2*((2*x + 1)*(2*x - 1)*x - 4*(2*x^2 - 1)*x)*(2*x + 1)*(2*x - 1)
sage: parent(chebyshev_U(3, Mod(8,9)))
Ring of integers modulo 9
sage: parent(chebyshev_U(3, Mod(1,9)))
Ring of integers modulo 9
sage: chebyshev_U(-3,x) + chebyshev_U(1,x)
0
sage: chebyshev_U(-1,Mod(5,8))
0
sage: parent(chebyshev_U(-1,Mod(5,8)))
Ring of integers modulo 8
sage: R.<t> = ZZ[]
sage: chebyshev_U.eval_algebraic(-2, t)
-1
sage: chebyshev_U.eval_algebraic(-1, t)
0
sage: chebyshev_U.eval_algebraic(0, t)
1
sage: chebyshev_U.eval_algebraic(1, t)
2*t
sage: n = 97; x = RIF(pi/n)
sage: chebyshev_U(n-1, cos(x)).contains_zero()
True
sage: R.<t> = Zp(2, 6, 'capped-abs')[]
sage: chebyshev_U(10^6+1, t)
(2 + O(2^6))*t + O(2^6)
"""
if n == -1:
return parent(x).zero()
if n < 0:
return -self._eval_recursive_(-n-2, x)[0]
return self._eval_recursive_(n, x)[0]
def _eval_recursive_(self, n, x, both=False):
"""
If ``both=True``, compute ``(U(n,x), U(n-1,x))`` using a
recursive formula.
If ``both=False``, return instead a tuple ``(U(n,x), False)``.
EXAMPLES::
sage: chebyshev_U._eval_recursive_(3, x)
(4*((2*x + 1)*(2*x - 1) - 2*x^2)*x, False)
sage: chebyshev_U._eval_recursive_(3, x, True)
(4*((2*x + 1)*(2*x - 1) - 2*x^2)*x, ((2*x + 1)*(2*x - 1) + 2*x)*((2*x + 1)*(2*x - 1) - 2*x))
"""
if n == 0:
return parent(x).one(), 2*x
assert n >= 1
a, b = self._eval_recursive_((n-1)//2, x, True)
if n % 2 == 0:
return (b+a)*(b-a), both and 2*b*(x*b-a)
else:
return 2*a*(b-x*a), both and (b+a)*(b-a)
def _evalf_(self, n, x, **kwds):
"""
Evaluate :class:`chebyshev_U` numerically with mpmath.
EXAMPLES::
sage: chebyshev_U(5,-4+3.*I)
98280.0000000000 - 11310.0000000000*I
sage: chebyshev_U(10,3).n(75)
4.661117900000000000000e7
sage: chebyshev_U._evalf_(1.5, Mod(8,9))
Traceback (most recent call last):
...
TypeError: cannot evaluate chebyshev_U with parent Ring of integers modulo 9
"""
try:
real_parent = kwds['parent']
except KeyError:
real_parent = parent(x)
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
# parent is not a real or complex field: figure out a good parent
if x in RR:
x = RR(x)
real_parent = RR
elif x in CC:
x = CC(x)
real_parent = CC
if not is_RealField(real_parent) and not is_ComplexField(real_parent):
raise TypeError("cannot evaluate chebyshev_U with parent {}".format(real_parent))
from sage.libs.mpmath.all import call as mpcall
from sage.libs.mpmath.all import chebyu as mpchebyu
return mpcall(mpchebyu, n, x, parent=real_parent)
def _eval_special_values_(self, n, x):
"""
Values known for special values of x.
See [AS1964]_ 22.4 (p.777).
EXAMPLES::
sage: var('n')
n
sage: chebyshev_U(n,1)
n + 1
sage: chebyshev_U(n,0)
1/2*(-1)^(1/2*n)*((-1)^n + 1)
sage: chebyshev_U(n,-1)
(-1)^n*(n + 1)
sage: chebyshev_U._eval_special_values_(n, 2)
Traceback (most recent call last):
...
ValueError: no special value found
"""
if x == 1:
return x*(n+1)
if x == -1:
return x**n*(n+1)
if x == 0:
return (1+(-1)**n)*(-1)**(n/2)/2
raise ValueError("no special value found")
def _eval_numpy_(self, n, x):
"""
Evaluate ``self`` using numpy.
EXAMPLES::
sage: import numpy
sage: z = numpy.array([1,2])
sage: z2 = numpy.array([[1,2],[1,2]])
sage: z3 = numpy.array([1,2,3.])
sage: chebyshev_U(1,z)
array([2., 4.])
sage: chebyshev_U(1,z2)
array([[2., 4.],
[2., 4.]])
sage: chebyshev_U(1,z3)
array([2., 4., 6.])
sage: chebyshev_U(z,0.1)
array([ 0.2 , -0.96])
"""
from scipy.special import eval_chebyu
return eval_chebyu(n, x)
def _derivative_(self, n, x, diff_param):
"""
Return the derivative of :class:`chebyshev_U` in form of the Chebyshev
polynomials of the first and second kind.
EXAMPLES::
sage: var('k')
k
sage: derivative(chebyshev_U(k,x),x)
((k + 1)*chebyshev_T(k + 1, x) - x*chebyshev_U(k, x))/(x^2 - 1)
sage: derivative(chebyshev_U(3,x),x)
24*x^2 - 4
sage: derivative(chebyshev_U(k,x),k)
Traceback (most recent call last):
...
NotImplementedError: derivative w.r.t. to the index is not supported yet
"""
if diff_param == 0:
raise NotImplementedError("derivative w.r.t. to the index is not supported yet")
elif diff_param == 1:
return ((n+1)*chebyshev_T(n+1, x) - x*chebyshev_U(n,x)) / (x*x-1)
raise ValueError("illegal differentiation parameter {}".format(diff_param))
chebyshev_U = Func_chebyshev_U()
class Func_legendre_P(GinacFunction):
r"""
EXAMPLES::
sage: legendre_P(4, 2.0)
55.3750000000000
sage: legendre_P(1, x)
x
sage: legendre_P(4, x+1)
35/8*(x + 1)^4 - 15/4*(x + 1)^2 + 3/8
sage: legendre_P(1/2, I+1.)
1.05338240025858 + 0.359890322109665*I
sage: legendre_P(0, SR(1)).parent()
Symbolic Ring
sage: legendre_P(0, 0)
1
sage: legendre_P(1, x)
x
sage: legendre_P(4, 2.)
55.3750000000000
sage: legendre_P(5.5,1.00001)
1.00017875754114
sage: legendre_P(1/2, I+1).n()
1.05338240025858 + 0.359890322109665*I
sage: legendre_P(1/2, I+1).n(59)
1.0533824002585801 + 0.35989032210966539*I
sage: legendre_P(42, RR(12345678))
2.66314881466753e309
sage: legendre_P(42, Reals(20)(12345678))
2.6632e309
sage: legendre_P(201/2, 0).n()
0.0561386178630179
sage: legendre_P(201/2, 0).n(100)
0.056138617863017877699963095883
sage: R.<x> = QQ[]
sage: legendre_P(4,x)
35/8*x^4 - 15/4*x^2 + 3/8
sage: legendre_P(10000,x).coefficient(x,1)
0
sage: var('t,x')
(t, x)
sage: legendre_P(-5,t)
35/8*t^4 - 15/4*t^2 + 3/8
sage: legendre_P(4, x+1)
35/8*(x + 1)^4 - 15/4*(x + 1)^2 + 3/8
sage: legendre_P(4, sqrt(2))
83/8
sage: legendre_P(4, I*e)
35/8*e^4 + 15/4*e^2 + 3/8
sage: n = var('n')
sage: derivative(legendre_P(n,x), x)
(n*x*legendre_P(n, x) - n*legendre_P(n - 1, x))/(x^2 - 1)
sage: derivative(legendre_P(3,x), x)
15/2*x^2 - 3/2
sage: derivative(legendre_P(n,x), n)
Traceback (most recent call last):
...
RuntimeError: derivative w.r.t. to the index is not supported yet
"""
def __init__(self):
r"""
Init method for the Legendre polynomials of the first kind.
EXAMPLES::
sage: loads(dumps(legendre_P))
legendre_P
"""
BuiltinFunction.__init__(self, 'legendre_P', nargs=2, latex_name=r"P",
conversions={'maxima':'legendre_p',
'mathematica':'LegendreP',
'maple':'LegendreP',
'giac':'legendre'})
legendre_P = Func_legendre_P()
class Func_legendre_Q(BuiltinFunction):
def __init__(self):
r"""
EXAMPLES::
sage: loads(dumps(legendre_Q))
legendre_Q
sage: maxima(legendre_Q(20,x, hold=True))._sage_().coefficient(x,10)
-29113619535/131072*log(-(x + 1)/(x - 1))
"""
BuiltinFunction.__init__(self, "legendre_Q", nargs=2, latex_name=r"Q",
conversions={'maxima':'legendre_q', 'mathematica':'LegendreQ',
'maple':'LegendreQ'})
def _eval_(self, n, x, *args, **kwds):
r"""
Return an evaluation of this Legendre Q expression.
EXAMPLES::
sage: legendre_Q(2,x)
1/4*(3*x^2 - 1)*(log(x + 1) - log(-x + 1)) - 3/2*x
sage: legendre_Q(5,0)
-8/15
sage: legendre_Q(2,2*x)
1/4*(12*x^2 - 1)*(log(2*x + 1) - log(-2*x + 1)) - 3*x
sage: legendre_Q(1/2, I+1.)
-0.511424110789061 + 1.34356195297194*I
sage: legendre_Q(-1,x)
Infinity
"""
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
if n in ZZ:
if n < 0:
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
return self.eval_formula(n, x)
def _eval_special_values_(self, n, x):
"""
Special values known.
EXAMPLES::
sage: var('n')
n
sage: legendre_Q(n, 0)
-1/2*sqrt(pi)*gamma(1/2*n + 1/2)*sin(1/2*pi*n)/gamma(1/2*n + 1)
sage: legendre_Q(-1., 0.)
+infinity
sage: legendre_Q(-1/2, 2)
elliptic_kc(3/2)
"""
if n == QQ(-1)/2:
from sage.functions.special import elliptic_kc
return elliptic_kc((x+1)/2)
if x == 1:
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
if x == -1:
from sage.rings.infinity import unsigned_infinity
return SR(unsigned_infinity)
if x == 0:
from .gamma import gamma
from .other import sqrt
from .trig import sin
try:
gam = gamma((n+1)/2)/gamma(n/2 + 1)
if gam.is_infinity():
return gam
return -(sqrt(SR.pi()))/2 * sin(SR.pi()/2*n) * gam
except TypeError:
pass
def _evalf_(self, n, x, parent=None, **kwds):
"""
Float evaluation of Legendre Q(n, x) function.
EXAMPLES::
sage: legendre_Q(4, 2.)
0.00116107583162041 - 86.9828465962674*I
sage: legendre_Q(1/2, I+1.)
-0.511424110789061 + 1.34356195297194*I
sage: legendre_Q(1/2, I+1).n(59)
-0.51142411078906080 + 1.3435619529719394*I
"""
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenq, n, 0, x, parent=parent)
def eval_recursive(self, n, arg, **kwds):
"""
Return expanded Legendre Q(n, arg) function expression.
EXAMPLES::
sage: legendre_Q.eval_recursive(2,x)
3/4*x^2*(log(x + 1) - log(-x + 1)) - 3/2*x - 1/4*log(x + 1) + 1/4*log(-x + 1)
sage: legendre_Q.eval_recursive(20,x).expand().coefficient(x,10)
-29113619535/131072*log(x + 1) + 29113619535/131072*log(-x + 1)
"""
from sage.functions.log import ln
if n == 0:
return (ln(1+arg)-ln(1-arg))/2
elif n == 1:
return arg/2*(ln(1+arg)-ln(1-arg))-1
x, l = PolynomialRing(QQ, 'x,l').gens()
help1 = l / 2
help2 = x / 2 * l - 1
for j in range(1, n):
help3 = (2 * j + 1) * x * help2 - j * help1
help3 = help3 / (j + 1)
help1 = help2
help2 = help3
sum1 = sum(help3.monomial_coefficient(mon)*arg**(mon.exponents()[0][0])
for mon in help3.monomials() if not l.divides(mon))
sum2 = sum(help3.monomial_coefficient(mon)*arg**(mon.exponents()[0][0])*(ln(1+arg)-ln(1-arg))
for mon in help3.monomials() if l.divides(mon))
return sum1 + sum2
def eval_formula(self, n, arg, **kwds):
"""
Return expanded Legendre ``Q(n, arg)`` function expression.
REFERENCE:
- T. M. Dunster, Legendre and Related Functions, https://dlmf.nist.gov/14.7#E2
EXAMPLES::
sage: legendre_Q.eval_formula(1, x)
1/2*x*(log(x + 1) - log(-x + 1)) - 1
sage: legendre_Q.eval_formula(2,x).expand().collect(log(1+x)).collect(log(1-x))
1/4*(3*x^2 - 1)*log(x + 1) - 1/4*(3*x^2 - 1)*log(-x + 1) - 3/2*x
sage: legendre_Q.eval_formula(20,x).coefficient(x,10)
-29113619535/131072*log(x + 1) + 29113619535/131072*log(-x + 1)
sage: legendre_Q(0, 2)
-1/2*I*pi + 1/2*log(3)
sage: legendre_Q(0, 2.)
0.549306144334055 - 1.57079632679490*I
"""
from sage.functions.log import ln
if n == 0:
return (ln(1+arg)-ln(1-arg))/2
elif n == 1:
return arg/2*(ln(1+arg)-ln(1-arg))-1
arg = SR(arg)
return legendre_P(n, arg)*(ln(1+arg)-ln(1-arg))/2 - self._Wfunc(n, arg)
def _Wfunc(self, n, arg):
"""
Helper function for ``eval_formula()``.
EXAMPLES::
sage: legendre_Q._Wfunc(2, x)
3/2*x
sage: legendre_Q._Wfunc(7, x)
429/16*x^6 - 275/8*x^4 + 849/80*x^2 - 16/35
"""
if n == 0:
return 0
if n == 1:
return 1
x = PolynomialRing(QQ, 'x').gen()
help1 = 0
help2 = 1
for j in range(2, n + 1):
help3 = (2 * j - 1) * x * help2 - (j - 1) * help1
help3 = help3 / j
help1 = help2
help2 = help3
return sum(b * arg**a for a, b in enumerate(help3))
def _derivative_(self, n, x, *args,**kwds):
"""
Return the derivative of legendre_Q.
EXAMPLES::
sage: n = var('n')
sage: derivative(legendre_Q(n,x), x)
(n*x*legendre_Q(n, x) - n*legendre_Q(n - 1, x))/(x^2 - 1)
sage: ex1 = legendre_Q(5,x,hold=True).diff(x).expand().simplify_full()
sage: ex2 = legendre_Q(5,x).diff(x).expand().simplify_full()
sage: ex1.subs(x=7).n() == ex2.subs(x=7).n()
True
sage: derivative(legendre_Q(n,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return (n*x*legendre_Q(n, x) - n*legendre_Q(n-1, x))/(x**2 - 1)
legendre_Q = Func_legendre_Q()
class Func_assoc_legendre_P(BuiltinFunction):
def __init__(self):
r"""
EXAMPLES::
sage: loads(dumps(gen_legendre_P))
gen_legendre_P
sage: maxima(gen_legendre_P(20,6,x, hold=True))._sage_().expand().coefficient(x,10)
2508866163428625/128
"""
BuiltinFunction.__init__(self, "gen_legendre_P", nargs=3, latex_name=r"P",
conversions={'maxima':'assoc_legendre_p', 'mathematica':'LegendreP',
'maple':'LegendreP'})
def _eval_(self, n, m, x, *args, **kwds):
r"""
Return an evaluation of this Legendre P(n, m, x) expression.
EXAMPLES::
sage: gen_legendre_P(3,2,2)
-90
sage: gen_legendre_P(13/2,2,0)
2*sqrt(2)*gamma(19/4)/(sqrt(pi)*gamma(13/4))
sage: gen_legendre_P(3,2,x)
-15*(x^2 - 1)*x
sage: gen_legendre_P(3,2,2).n() # abs tol 1e-14
-90.0000000000000
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
if (n in ZZ and m in ZZ
and n >= 0 and m >= 0
and (x in ZZ or not SR(x).is_numeric())):
return self.eval_poly(n, m, x)
def _eval_special_values_(self, n, m, x):
"""
Special values known.
EXAMPLES::
sage: gen_legendre_P(2,3,4)
0
sage: gen_legendre_P(2,0,4)==legendre_P(2,4)
True
sage: gen_legendre_P(2,2,4)
45
sage: gen_legendre_P(2,2,x)
3*x^2 - 3
sage: gen_legendre_P(13/2,2,0)
2*sqrt(2)*gamma(19/4)/(sqrt(pi)*gamma(13/4))
sage: (m,n) = var('m,n')
sage: gen_legendre_P(n,m,0)
2^m*cos(1/2*pi*m + 1/2*pi*n)*gamma(1/2*m + 1/2*n + 1/2)/(sqrt(pi)*gamma(-1/2*m + 1/2*n + 1))
sage: gen_legendre_P(n,3,0)
8*cos(3/2*pi + 1/2*pi*n)*gamma(1/2*n + 2)/(sqrt(pi)*gamma(1/2*n - 1/2))
sage: gen_legendre_P(3,m,0)
2^m*cos(3/2*pi + 1/2*pi*m)*gamma(1/2*m + 2)/(sqrt(pi)*gamma(-1/2*m + 5/2))
"""
if m > n:
return ZZ(0)
if m == 0:
return legendre_P(n, x)
if n == m:
return factorial(2*m)/2**m/factorial(m) * (x**2-1)**(m/2)
if x == 0:
from .gamma import gamma
from .other import sqrt
from .trig import cos
if m in QQ and n in QQ:
return 2**m/sqrt(SR.pi())*cos((n+m)/2*SR.pi())*(gamma(QQ(n+m+1)/2)/gamma(QQ(n-m)/2+1))
elif isinstance(n, Expression) or isinstance(m, Expression):
return 2**m/sqrt(SR.pi())*cos((n+m)/2*SR.pi())*(gamma((n+m+1)/2)/gamma((n-m)/2+1))
def _evalf_(self, n, m, x, parent=None, **kwds):
"""
Float evaluation of Legendre P(n, m, x) function.
EXAMPLES::
sage: gen_legendre_P(10,2,3).n() # abs tol 1e-14
-7.19496360000000e8
sage: gen_legendre_P(5/2,2,1.+I)
14.3165258449040 - 12.7850496155152*I
sage: gen_legendre_P(5/2,2,ComplexField(70)(1+I))
14.316525844904028532 - 12.785049615515157033*I
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenp, n, m, x, parent=parent)
def eval_poly(self, n, m, arg, **kwds):
"""
Return the associated Legendre P(n, m, arg) polynomial for integers `n > -1, m > -1`.
EXAMPLES::
sage: gen_legendre_P(7,4,x)
3465/2*(13*x^3 - 3*x)*(x^2 - 1)^2
sage: gen_legendre_P(3,1,sqrt(x))
-3/2*(5*x - 1)*sqrt(-x + 1)
REFERENCE:
- T. M. Dunster, Legendre and Related Functions, https://dlmf.nist.gov/14.7#E10
"""
from sage.functions.other import factorial
if n < 0 or m < 0:
return
R = PolynomialRing(QQ, 'x')
x = R.gen()
p = (1-x**2)**ZZ(n)
for i in range(m + n):
p = p.diff(x)
ex1 = (1-arg**2)**(QQ(m)/2)/2**n/factorial(ZZ(n))
ex2 = sum(b * arg**a for a, b in enumerate(p))
return (-1)**(m+n)*ex1*ex2
def _derivative_(self, n, m, x, *args,**kwds):
"""
Return the derivative of ``gen_legendre_P(n,m,x)``.
EXAMPLES::
sage: (m,n) = var('m,n')
sage: derivative(gen_legendre_P(n,m,x), x)
-((n + 1)*x*gen_legendre_P(n, m, x) + (m - n - 1)*gen_legendre_P(n + 1, m, x))/(x^2 - 1)
sage: gen_legendre_P(3,2,x,hold=True).diff(x).expand().simplify_full()
-45*x^2 + 15
sage: derivative(gen_legendre_P(n,m,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return ((n-m+1)*gen_legendre_P(n+1, m, x) - (n+1)*x*gen_legendre_P(n, m, x))/(x**2 - 1)
gen_legendre_P = Func_assoc_legendre_P()
class Func_assoc_legendre_Q(BuiltinFunction):
def __init__(self):
r"""
EXAMPLES::
sage: loads(dumps(gen_legendre_Q))
gen_legendre_Q
sage: maxima(gen_legendre_Q(2,1,3, hold=True))._sage_().simplify_full()
1/4*sqrt(2)*(36*pi - 36*I*log(2) + 25*I)
"""
BuiltinFunction.__init__(self, "gen_legendre_Q", nargs=3, latex_name=r"Q",
conversions={'maxima':'assoc_legendre_q', 'mathematica':'LegendreQ',
'maple':'LegendreQ'})
def _eval_(self, n, m, x, *args, **kwds):
r"""
Return an evaluation of this Legendre Q(n, m, x) expression.
EXAMPLES::
sage: gen_legendre_Q(2,1,3)
-1/4*sqrt(-2)*(-36*I*pi + 36*log(2) - 25)
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
if (n in ZZ and m in ZZ
and n >= 0 and m >= 0
and (x in ZZ or not SR(x).is_numeric())):
return self.eval_recursive(n, m, x)
def _eval_special_values_(self, n, m, x):
"""
Special values known.
EXAMPLES::
sage: n, m = var('n m')
sage: gen_legendre_Q(n,m,0)
-sqrt(pi)*2^(m - 1)*gamma(1/2*m + 1/2*n + 1/2)*sin(1/2*pi*m + 1/2*pi*n)/gamma(-1/2*m + 1/2*n + 1)
"""
if m == 0:
return legendre_Q(n, x)
if x.is_zero():
from .gamma import gamma
from .other import sqrt
from .trig import sin
if m in QQ and n in QQ:
return -(sqrt(SR.pi()))*sin(SR.pi()/2*(m+n))*gamma(QQ(m+n+1)/2)/gamma(QQ(n-m)/2 + 1)*2**(m-1)
elif isinstance(n, Expression) or isinstance(m, Expression):
return -(sqrt(SR.pi()))*sin(SR.pi()/2*(m+n))*gamma((m+n+1)/2)/gamma((n-m)/2 + 1)*2**(m-1)
def _evalf_(self, n, m, x, parent=None, **kwds):
"""
Float evaluation of Legendre Q(n, m, x) function.
EXAMPLES::
sage: gen_legendre_Q(2,1,3.)
-39.9859464434253 + 0.0165114736149193*I
sage: gen_legendre_Q(2,1,ComplexField(70)(3))
-39.985946443425296223 + 0.016511473614919329585*I
"""
ret = self._eval_special_values_(n, m, x)
if ret is not None:
return ret
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.legenq, n, m, x, parent=parent)
def eval_recursive(self, n, m, x, **kwds):
"""
Return the associated Legendre Q(n, m, arg) function for integers `n > -1, m > -1`.
EXAMPLES::
sage: gen_legendre_Q(3,4,x)
48/(x^2 - 1)^2
sage: gen_legendre_Q(4,5,x)
-384/((x^2 - 1)^2*sqrt(-x^2 + 1))
sage: gen_legendre_Q(0,1,x)
-1/sqrt(-x^2 + 1)
sage: gen_legendre_Q(0,2,x)
-1/2*((x + 1)^2 - (x - 1)^2)/(x^2 - 1)
sage: gen_legendre_Q(2,2,x).subs(x=2).expand()
9/2*I*pi - 9/2*log(3) + 14/3
"""
from sage.functions.all import sqrt
if m == n + 1 or n == 0:
if m.mod(2).is_zero():
denom = (1 - x**2)**(m/2)
else:
denom = sqrt(1 - x**2)*(1 - x**2)**((m-1)/2)
if m == n + 1:
return (-1)**m*(m-1).factorial()*2**n/denom
else:
return (-1)**m*(m-1).factorial()*((x+1)**m - (x-1)**m)/(2*denom)
else:
return ((n-m+1)*x*gen_legendre_Q(n,m-1,x)-(n+m-1)*gen_legendre_Q(n-1,m-1,x))/sqrt(1-x**2)
def _derivative_(self, n, m, x, *args,**kwds):
"""
Return the derivative of ``gen_legendre_Q(n,m,x)``.
EXAMPLES::
sage: (m,n) = var('m,n')
sage: derivative(gen_legendre_Q(n,m,x), x)
-((n + 1)*x*gen_legendre_Q(n, m, x) + (m - n - 1)*gen_legendre_Q(n + 1, m, x))/(x^2 - 1)
sage: ex1=gen_legendre_Q(3,2,x,hold=True).diff(x).expand().simplify_full()
sage: ex2=gen_legendre_Q(3,2,x).diff(x).expand().simplify_full()
sage: ex1.subs(x=5).n() == ex2.subs(x=5).n()
True
sage: derivative(gen_legendre_Q(n,m,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
else:
return ((n-m+1)*gen_legendre_Q(n+1, m, x) - (n+1)*x*gen_legendre_Q(n, m, x))/(x**2 - 1)
gen_legendre_Q = Func_assoc_legendre_Q()
class Func_hermite(GinacFunction):
"""
Returns the Hermite polynomial for integers `n > -1`.
REFERENCE:
- [AS1964]_ 22.5.40 and 22.5.41, page 779.
EXAMPLES::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: hermite(2,x)
4*x^2 - 2
sage: hermite(3,x)
8*x^3 - 12*x
sage: hermite(3,2)
40
sage: S.<y> = PolynomialRing(RR)
sage: hermite(3,y)
8.00000000000000*y^3 - 12.0000000000000*y
sage: R.<x,y> = QQ[]
sage: hermite(3,y^2)
8*y^6 - 12*y^2
sage: w = var('w')
sage: hermite(3,2*w)
64*w^3 - 24*w
sage: hermite(5,3.1416)
5208.69733891963
sage: hermite(5,RealField(100)(pi))
5208.6167627118104649470287166
Check that :trac:`17192` is fixed::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: hermite(0,x)
1
sage: hermite(-1,x)
Traceback (most recent call last):
...
RuntimeError: hermite_eval: The index n must be a nonnegative integer
sage: hermite(-7,x)
Traceback (most recent call last):
...
RuntimeError: hermite_eval: The index n must be a nonnegative integer
sage: _ = var('m x')
sage: hermite(m, x).diff(m)
Traceback (most recent call last):
...
RuntimeError: derivative w.r.t. to the index is not supported yet
"""
def __init__(self):
r"""
Init method for the Hermite polynomials.
EXAMPLES::
sage: loads(dumps(hermite))
hermite
sage: hermite(x, x)._sympy_()
hermite(x, x)
"""
GinacFunction.__init__(self, "hermite", nargs=2, latex_name=r"H",
conversions={'maxima':'hermite', 'mathematica':'HermiteH',
'maple':'HermiteH', 'sympy':'hermite'}, preserved_arg=2)
hermite = Func_hermite()
class Func_jacobi_P(OrthogonalFunction):
r"""
Return the Jacobi polynomial `P_n^{(a,b)}(x)` for
integers `n > -1` and a and b symbolic or `a > -1`
and `b > -1`. The Jacobi polynomials are actually defined
for all a and b. However, the Jacobi polynomial weight
`(1-x)^a(1+x)^b` isn't integrable for `a \leq -1`
or `b \leq -1`.
REFERENCE:
- Table on page 789 in [AS1964]_.
EXAMPLES::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: jacobi_P(2,0,0,x)
3/2*x^2 - 1/2
sage: jacobi_P(2,1,2,1.2)
5.01000000000000
"""
def __init__(self):
r"""
Init method for the Jacobi polynomials.
EXAMPLES::
sage: _ = var('n a b x')
sage: loads(dumps(jacobi_P))
jacobi_P
sage: jacobi_P(n, a, b, x, hold=True)._sympy_()
jacobi(n, a, b, x)
"""
OrthogonalFunction.__init__(self, "jacobi_P", nargs=4, latex_name=r"P",
conversions={'maxima':'jacobi_p', 'mathematica':'JacobiP',
'maple':'JacobiP', 'sympy':'jacobi'})
def _eval_(self, n, a, b, x):
"""
EXAMPLES::
sage: _ = var('n a b x')
sage: jacobi_P(1,n,n,n)
(n + 1)*n
sage: jacobi_P(2,n,n,n)
1/4*(2*n - 1)*(n + 2)*(n + 1)^2
sage: jacobi_P(1,n,n,x)
(n + 1)*x
sage: jacobi_P(3,2,1,x)
21/2*x^3 + 7/2*x^2 - 7/2*x - 1/2
sage: jacobi_P(1,a,b,x)
1/2*a*x + 1/2*b*x + 1/2*a - 1/2*b + x
TESTS:
Check that :trac:`17192` is fixed::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: jacobi_P(0,0,0,x)
1
sage: jacobi_P(-1,0,0,x)
1
sage: jacobi_P(-1,1,1,x)
Traceback (most recent call last):
...
ValueError: n must be greater than -1, got n = -1
sage: jacobi_P(-7,0,0,x)
231/16*x^6 - 315/16*x^4 + 105/16*x^2 - 5/16
sage: jacobi_P(-7,0,2,x)
Traceback (most recent call last):
...
ValueError: n must be greater than -1, got n = -7
"""
if SR(a).is_trivial_zero() and SR(b).is_trivial_zero():
return legendre_P(n, x)
if SR(n).is_numeric() and not (n > -1):
raise ValueError("n must be greater than -1, got n = {0}".format(n))
if not n in ZZ:
return
from .gamma import gamma
s = sum(binomial(n,m) * gamma(a+b+n+m+1) / gamma(a+m+1) * ((x-1)/2)**m for m in range(n+1))
r = gamma(a+n+1) / factorial(n) / gamma(n+a+b+1) * s
return r.to_gamma().gamma_normalize().normalize()
def _evalf_(self, n, a, b, x, **kwds):
"""
EXAMPLES::
sage: jacobi_P(2, 1, 2, 1.2)
5.01000000000000
sage: jacobi_P(2, 1, 2, 1.2, hold=True).n(20)
5.0100
sage: jacobi_P(2, 1, 2, pi+I, hold=True).n(100)
41.103034125334442891187112674 + 31.486722862692829003857755524*I
"""
from sage.rings.complex_arb import ComplexBallField as CBF
the_parent = kwds.get('parent', None)
if the_parent is None:
the_parent = parent(x)
prec = the_parent.precision()
BF = CBF(prec+5)
ret = BF(x).jacobi_P(BF(n), BF(a), BF(b))
return SR(ret)._eval_self(the_parent)
jacobi_P = Func_jacobi_P()
class Func_ultraspherical(GinacFunction):
r"""
Return the ultraspherical (or Gegenbauer) polynomial gegenbauer(n,a,x),
.. MATH::
C_n^{a}(x)=\sum_{k=0}^{\lfloor n/2\rfloor} (-1)^k\frac{\Gamma(n-k+a)}
{\Gamma(a)k!(n-2k)!}(2x)^{n-2k}.
When `n` is a nonnegative integer, this formula gives a
polynomial in `z` of degree `n`, but all parameters are
permitted to be complex numbers. When `a = 1/2`, the
Gegenbauer polynomial reduces to a Legendre polynomial.
Computed using Pynac.
For numerical evaluation, consider using the `mpmath library,
<http://mpmath.org/doc/current/functions/orthogonal.html#gegenbauer-polynomials>`_,
as it also allows complex numbers (and negative `n` as well);
see the examples below.
REFERENCE:
- [AS1964]_ 22.5.27
EXAMPLES::
sage: ultraspherical(8, 101/11, x)
795972057547264/214358881*x^8 - 62604543852032/19487171*x^6...
sage: x = PolynomialRing(QQ, 'x').gen()
sage: ultraspherical(2,3/2,x)
15/2*x^2 - 3/2
sage: ultraspherical(1,1,x)
2*x
sage: t = PolynomialRing(RationalField(),"t").gen()
sage: gegenbauer(3,2,t)
32*t^3 - 12*t
sage: _ = var('x')
sage: for N in range(100):
....: n = ZZ.random_element().abs() + 5
....: a = QQ.random_element().abs() + 5
....: assert ((n+1)*ultraspherical(n+1,a,x) - 2*x*(n+a)*ultraspherical(n,a,x) + (n+2*a-1)*ultraspherical(n-1,a,x)).expand().is_zero()
sage: ultraspherical(5,9/10,3.1416)
6949.55439044240
sage: ultraspherical(5,9/10,RealField(100)(pi))
6949.4695419382702451843080687
sage: _ = var('a n')
sage: gegenbauer(2,a,x)
2*(a + 1)*a*x^2 - a
sage: gegenbauer(3,a,x)
4/3*(a + 2)*(a + 1)*a*x^3 - 2*(a + 1)*a*x
sage: gegenbauer(3,a,x).expand()
4/3*a^3*x^3 + 4*a^2*x^3 + 8/3*a*x^3 - 2*a^2*x - 2*a*x
sage: gegenbauer(10,a,x).expand().coefficient(x,2)
1/12*a^6 + 5/4*a^5 + 85/12*a^4 + 75/4*a^3 + 137/6*a^2 + 10*a
sage: ex = gegenbauer(100,a,x)
sage: (ex.subs(a==55/98) - gegenbauer(100,55/98,x)).is_trivial_zero()
True
sage: gegenbauer(2,-3,x)
12*x^2 + 3
sage: gegenbauer(120,-99/2,3)
1654502372608570682112687530178328494861923493372493824
sage: gegenbauer(5,9/2,x)
21879/8*x^5 - 6435/4*x^3 + 1287/8*x
sage: gegenbauer(15,3/2,5)
3903412392243800
sage: derivative(gegenbauer(n,a,x),x)
2*a*gegenbauer(n - 1, a + 1, x)
sage: derivative(gegenbauer(3,a,x),x)
4*(a + 2)*(a + 1)*a*x^2 - 2*(a + 1)*a
sage: derivative(gegenbauer(n,a,x),a)
Traceback (most recent call last):
...
RuntimeError: derivative w.r.t. to the second index is not supported yet
Numerical evaluation with the mpmath library::
sage: from mpmath import gegenbauer as gegenbauer_mp
sage: from mpmath import mp
sage: mp.pretty = True; mp.dps=25
sage: gegenbauer_mp(-7,0.5,0.3)
0.1291811875
sage: gegenbauer_mp(2+3j, -0.75, -1000j)
(-5038991.358609026523401901 + 9414549.285447104177860806j)
TESTS:
Check that :trac:`17192` is fixed::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: ultraspherical(0,1,x)
1
sage: ultraspherical(-1,1,x)
Traceback (most recent call last):
...
RuntimeError: gegenb_eval: The index n must be a nonnegative integer
sage: ultraspherical(-7,1,x)
Traceback (most recent call last):
...
RuntimeError: gegenb_eval: The index n must be a nonnegative integer
"""
def __init__(self):
r"""
Init method for the ultraspherical polynomials.
EXAMPLES::
sage: loads(dumps(ultraspherical))
gegenbauer
sage: ultraspherical(x, x, x)._sympy_()
gegenbauer(x, x, x)
"""
GinacFunction.__init__(self, "gegenbauer", nargs=3, latex_name=r"C",
conversions={'maxima':'ultraspherical', 'mathematica':'GegenbauerC',
'maple':'GegenbauerC', 'sympy':'gegenbauer'})
ultraspherical = Func_ultraspherical()
gegenbauer = Func_ultraspherical()
class Func_laguerre(OrthogonalFunction):
"""
REFERENCE:
- [AS1964]_ 22.5.16, page 778 and page 789.
"""
def __init__(self):
r"""
Init method for the Laguerre polynomials.
EXAMPLES::
sage: n,x = var('n,x')
sage: loads(dumps(laguerre))
laguerre
sage: laguerre(x, x)._sympy_()
laguerre(x, x)
sage: maxima(laguerre(1, x, hold=True))
1-_SAGE_VAR_x
sage: maxima(laguerre(n, laguerre(n, x)))
laguerre(_SAGE_VAR_n,laguerre(_SAGE_VAR_n,_SAGE_VAR_x))
"""
OrthogonalFunction.__init__(self, "laguerre", nargs=2, latex_name=r"L",
conversions={'maxima':'laguerre', 'mathematica':'LaguerreL',
'maple':'LaguerreL', 'sympy':'laguerre'})
def _eval_(self, n, x, *args, **kwds):
r"""
Return an evaluation of this Laguerre polynomial expression.
EXAMPLES::
sage: x = PolynomialRing(QQ, 'x').gen()
sage: laguerre(2,x)
1/2*x^2 - 2*x + 1
sage: laguerre(3,x)
-1/6*x^3 + 3/2*x^2 - 3*x + 1
sage: laguerre(2,2)
-1
sage: laguerre(-1, x)
e^x
sage: laguerre(-6, x)
1/120*(x^5 + 25*x^4 + 200*x^3 + 600*x^2 + 600*x + 120)*e^x
sage: laguerre(-9,2)
66769/315*e^2
"""
from sage.rings.integer import Integer
from sage.functions.log import exp
ret = self._eval_special_values_(n, x)
if ret is not None:
return ret
if isinstance(n, (Integer, int)):
if n >= 0 and not hasattr(x, 'prec'):
return self._pol_laguerre(n, x)
elif n < 0:
return exp(x)*laguerre(-n-1, -x)
def _eval_special_values_(self, n, x):
"""
Special values known.
EXAMPLES::
sage: laguerre(0, 0)
1
sage: laguerre(1, x)
-x + 1
"""
if n == 0 or x == 0:
return ZZ(1)
if n == 1:
return ZZ(1) - x
def _pol_laguerre(self, n, x):
"""
Fast creation of Laguerre polynomial.
EXAMPLES::
sage: laguerre(3,sin(x))
-1/6*sin(x)^3 + 3/2*sin(x)^2 - 3*sin(x) + 1
sage: R.<x> = PolynomialRing(QQ, 'x')
sage: laguerre(4,x)
1/24*x^4 - 2/3*x^3 + 3*x^2 - 4*x + 1
sage: laguerre(4,x+1)
1/24*(x + 1)^4 - 2/3*(x + 1)^3 + 3*(x + 1)^2 - 4*x - 3
sage: laguerre(10,1+I)
142511/113400*I + 95867/22680
"""
if hasattr(x, 'pyobject'):
try:
x = x.pyobject()
except TypeError:
pass
return SR(sum(binomial(n, k) * (-1)**k / factorial(k) * x**k
for k in range(n + 1)))
def _evalf_(self, n, x, **kwds):
"""
Return the evaluation of `laguerre(n,x)` with floating point `x`.
EXAMPLES::
sage: laguerre(100,RealField(300)(pi))
-0.638322077840648311606324...
sage: laguerre(10,1.+I)
4.22694003527337 + 1.25671075837743*I
sage: laguerre(-9, 2.)
1566.22186244286
"""
the_parent = kwds.get('parent', None)
if the_parent is None:
the_parent = parent(x)
import mpmath
from sage.libs.mpmath.all import call as mpcall
if n<0:
# work around mpmath issue 307
from sage.functions.log import exp
return exp(x) * mpcall(mpmath.laguerre, -n-1, 0, -x, parent=the_parent)
else:
return mpcall(mpmath.laguerre, n, 0, x, parent=the_parent)
def _derivative_(self, n, x, *args,**kwds):
"""
Return the derivative of `laguerre(n,x)`.
EXAMPLES::
sage: n=var('n')
sage: diff(laguerre(n,x), x)
-gen_laguerre(n - 1, 1, x)
TESTS::
sage: diff(laguerre(x,x))
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
diff_param = kwds['diff_param']
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
if diff_param == 1:
return -gen_laguerre(n-1,1,x)
else:
raise ValueError("illegal differentiation parameter {}".format(diff_param))
laguerre = Func_laguerre()
class Func_gen_laguerre(OrthogonalFunction):
"""
REFERENCE:
- [AS1964]_ 22.5.16, page 778 and page 789.
"""
def __init__(self):
r"""
Init method for the Laguerre polynomials.
EXAMPLES::
sage: a,n,x = var('a, n, x')
sage: loads(dumps(gen_laguerre))
gen_laguerre
sage: gen_laguerre(x, x, x)._sympy_()
assoc_laguerre(x, x, x)
sage: maxima(gen_laguerre(1,2,x, hold=True))
3*(1-_SAGE_VAR_x/3)
sage: maxima(gen_laguerre(n, a, gen_laguerre(n, a, x)))
gen_laguerre(_SAGE_VAR_n,_SAGE_VAR_a,gen_laguerre(_SAGE_VAR_n,_SAGE_VAR_a,_SAGE_VAR_x))
"""
OrthogonalFunction.__init__(self, "gen_laguerre", nargs=3, latex_name=r"L",
conversions={'maxima':'gen_laguerre', 'mathematica':'LaguerreL',
'maple':'LaguerreL', 'sympy':'assoc_laguerre'})
def _eval_(self, n, a, x, *args, **kwds):
r"""
Return an evaluation of this Laguerre polynomial expression.
EXAMPLES::
sage: gen_laguerre(2, 1, x)
1/2*x^2 - 3*x + 3
sage: gen_laguerre(2, 1/2, x)
1/2*x^2 - 5/2*x + 15/8
sage: gen_laguerre(2, -1/2, x)
1/2*x^2 - 3/2*x + 3/8
sage: gen_laguerre(2, 0, x)
1/2*x^2 - 2*x + 1
sage: gen_laguerre(3, 0, x)
-1/6*x^3 + 3/2*x^2 - 3*x + 1
"""
from sage.rings.integer import Integer
ret = self._eval_special_values_(n, a, x)
if ret is not None:
return ret
if isinstance(n, Integer):
if n >= 0 and not hasattr(x, 'prec'):
return self._pol_gen_laguerre(n, a, x)
def _eval_special_values_(self, n, a, x):
"""
Special values known.
EXAMPLES::
sage: gen_laguerre(0, 1, pi)
1
sage: gen_laguerre(1, 2, x)
-x + 3
sage: gen_laguerre(3, 4, 0)
35
"""
if n == 0:
return ZZ(1)
if n == 1:
return ZZ(1) + a - x
if a == 0:
return laguerre(n, x)
if x == 0:
from sage.arith.all import binomial
return binomial(n+a, n)
def _pol_gen_laguerre(self, n, a, x):
"""
EXAMPLES::
sage: gen_laguerre(3, 1/2, sin(x))
-1/6*sin(x)^3 + 7/4*sin(x)^2 - 35/8*sin(x) + 35/16
sage: R.<x> = PolynomialRing(QQ, 'x')
sage: gen_laguerre(4, -1/2, x)
1/24*x^4 - 7/12*x^3 + 35/16*x^2 - 35/16*x + 35/128
sage: gen_laguerre(4, -1/2, x+1)
1/24*(x + 1)^4 - 7/12*(x + 1)^3 + 35/16*(x + 1)^2 - 35/16*x - 245/128
sage: gen_laguerre(10, 1, 1+I)
25189/2100*I + 11792/2835
"""
return sum(binomial(n + a, n - k) * (-1)**k / factorial(k) * x**k
for k in range(n + 1))
def _evalf_(self, n, a, x, **kwds):
"""
EXAMPLES::
sage: gen_laguerre(100,1,RealField(300)(pi))
-0.89430788373354541911...
sage: gen_laguerre(10,1/2,1.+I)
5.34469635574906 + 5.23754057922902*I
"""
the_parent = kwds.get('parent', None)
if the_parent is None:
the_parent = parent(x)
import mpmath
from sage.libs.mpmath.all import call as mpcall
return mpcall(mpmath.laguerre, n, a, x, parent=the_parent)
def _derivative_(self, n, a, x, diff_param):
"""
Return the derivative of `gen_laguerre(n,a,x)`.
EXAMPLES::
sage: (a,n)=var('a,n')
sage: diff(gen_laguerre(n,a,x), x)
-gen_laguerre(n - 1, a + 1, x)
sage: gen_laguerre(n,a,x).diff(a)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the second index is not supported.
TESTS::
sage: diff(gen_laguerre(n,a,x), n)
Traceback (most recent call last):
...
NotImplementedError: Derivative w.r.t. to the index is not supported.
"""
if diff_param == 0:
raise NotImplementedError("Derivative w.r.t. to the index is not supported.")
elif diff_param == 1:
raise NotImplementedError("Derivative w.r.t. to the second index is not supported.")
elif diff_param == 2:
return -gen_laguerre(n - 1, a + 1, x)
else:
raise ValueError("illegal differentiation parameter {}".format(diff_param))
gen_laguerre = Func_gen_laguerre()
| 32.007407
| 212
| 0.523449
|
7ca863067a8fd90e6f4b817b64937c2e01094dca
| 15,179
|
py
|
Python
|
Segmentation/train_DenseNet_one_Concrete_CamVid11.py
|
ardywibowo/LBD
|
1c56917fa0797c98ef1233879849c9ec536fe896
|
[
"MIT"
] | null | null | null |
Segmentation/train_DenseNet_one_Concrete_CamVid11.py
|
ardywibowo/LBD
|
1c56917fa0797c98ef1233879849c9ec536fe896
|
[
"MIT"
] | null | null | null |
Segmentation/train_DenseNet_one_Concrete_CamVid11.py
|
ardywibowo/LBD
|
1c56917fa0797c98ef1233879849c9ec536fe896
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os,time,cv2, sys, math
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import time, datetime
import argparse
import random
import os, sys
import subprocess
import matplotlib
matplotlib.use('Agg')
from utils import utils, helpers
from builders import model_builder
import matplotlib.pyplot as plt
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=700, help='Number of epochs to train for')
parser.add_argument('--epoch_start_i', type=int, default=0, help='Start counting epochs from this number')
parser.add_argument('--checkpoint_step', type=int, default=500, help='How often to save checkpoints (epochs)')
parser.add_argument('--validation_step', type=int, default=500, help='How often to perform validation (epochs)')
parser.add_argument('--image', type=str, default=None, help='The image you want to predict on. Only valid in "predict" mode.')
parser.add_argument('--continue_training', type=str2bool, default=False, help='Whether to continue training from a checkpoint')
parser.add_argument('--dataset', type=str, default="CamVid11", help='Dataset you are using.')
parser.add_argument('--crop_height', type=int, default=352, help='Height of cropped input image to network')
parser.add_argument('--crop_width', type=int, default=480, help='Width of cropped input image to network')
parser.add_argument('--batch_size', type=int, default=1, help='Number of images in each batch')
parser.add_argument('--num_val_images', type=int, default=20, help='The number of images to used for validations')
parser.add_argument('--h_flip', type=str2bool, default=True, help='Whether to randomly flip the image horizontally for data augmentation')
parser.add_argument('--v_flip', type=str2bool, default=False, help='Whether to randomly flip the image vertically for data augmentation')
parser.add_argument('--brightness', type=float, default=None, help='Whether to randomly change the image brightness for data augmentation. Specifies the max bightness change as a factor between 0.0 and 1.0. For example, 0.1 represents a max brightness change of 10%% (+-).')
parser.add_argument('--rotation', type=float, default=None, help='Whether to randomly rotate the image for data augmentation. Specifies the max rotation angle in degrees.')
parser.add_argument('--model', type=str, default="DenseNet_concrete", help='The model you are using. See model_builder.py for supported models')
parser.add_argument('--frontend', type=str, default="Xception", help='The frontend you are using. See frontend_builder.py for supported models')
args = parser.parse_args()
def data_augmentation(input_image, output_image):
input_image, output_image = utils.random_crop(input_image, output_image, args.crop_height, args.crop_width)
if args.h_flip and random.randint(0,1):
input_image = cv2.flip(input_image, 1)
output_image = cv2.flip(output_image, 1)
if args.v_flip and random.randint(0,1):
input_image = cv2.flip(input_image, 0)
output_image = cv2.flip(output_image, 0)
if args.brightness:
factor = 1.0 + random.uniform(-1.0*args.brightness, args.brightness)
table = np.array([((i / 255.0) * factor) * 255 for i in np.arange(0, 256)]).astype(np.uint8)
input_image = cv2.LUT(input_image, table)
if args.rotation:
angle = random.uniform(-1*args.rotation, args.rotation)
if args.rotation:
M = cv2.getRotationMatrix2D((input_image.shape[1]//2, input_image.shape[0]//2), angle, 1.0)
input_image = cv2.warpAffine(input_image, M, (input_image.shape[1], input_image.shape[0]), flags=cv2.INTER_NEAREST)
output_image = cv2.warpAffine(output_image, M, (output_image.shape[1], output_image.shape[0]), flags=cv2.INTER_NEAREST)
return input_image, output_image
###### HYPERPARAMETERS
l = 0.01
weight_regularizer = 1e-8
drop_regularizer = 1.6e-8 #(1/(nwh))
ignore_label = 11
# Get the names of the classes so we can record the evaluation results
class_names_list, label_values_3 = helpers.get_label_info(os.path.join(args.dataset, "class_dict.csv"))
label_values=np.array(label_values_3)[:,0]
class_names_string = ""
for class_name in class_names_list:
if not class_name == class_names_list[-1]:
class_names_string = class_names_string + class_name + ", "
else:
class_names_string = class_names_string + class_name
num_classes = len(label_values)
#config = tf.ConfigProto()
#config.gpu_options.allow_growth = True
#sess=tf.Session(config=config)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
# Compute loss
net_input = tf.placeholder(tf.float32,shape=[None,None,None,3])
net_output = tf.placeholder(tf.float32,shape=[None,None,None,num_classes])
scalar_output = tf.placeholder(tf.float32,shape=[None,None,None])
network, _, _ = model_builder.build_model_concrete(model_name=args.model, net_input=net_input, num_classes=num_classes, \
crop_width=args.crop_width, crop_height=args.crop_height, one_parameter=True)
kl_weight = tf.placeholder_with_default(0.001, shape=())
kl_term = tf.Variable(0.0)
not_ignore = tf.to_float(tf.not_equal(scalar_output, ignore_label))
loss = tf.reduce_mean(tf.losses.softmax_cross_entropy(tf.reshape(net_output, shape=[-1, num_classes]), tf.reshape(network, shape=[-1, num_classes]), \
weights = tf.reshape(not_ignore, shape=[-1])) )
for i in range(15):
densenet_scope = 'FC-DenseNet103' + "/denseblock6" + "/layer%d" % (i)
with tf.variable_scope(densenet_scope, reuse=tf.AUTO_REUSE):
p_current = tf.get_variable('p')
with tf.variable_scope(densenet_scope, reuse=tf.AUTO_REUSE):
w_current = tf.get_variable('Conv/weights')
sig = tf.sigmoid(p_current)
log_sig = tf.log_sigmoid(p_current)
log_sig_ = tf.log_sigmoid(-p_current)
kl_term += tf.reduce_sum( ( weight_regularizer / (sig) * (tf.reduce_sum(tf.square(w_current),axis=[0,1,2]))) + \
drop_regularizer*(sig*log_sig + (1.0-sig)*log_sig_))
loss = loss + kl_weight * kl_term
opt = tf.train.RMSPropOptimizer(learning_rate=0.001, decay=0.995).minimize(loss, var_list=[var for var in tf.trainable_variables()])
saver=tf.train.Saver(max_to_keep=1000)
sess.run(tf.global_variables_initializer())
utils.count_params()
# Load a previous checkpoint if desired
model_checkpoint_name = "checkpoints/latest_model_" + args.model + "_" + args.dataset + "_one.ckpt"
if args.continue_training:
print('Loaded latest model checkpoint')
saver.restore(sess, model_checkpoint_name)
# Load the data
print("Loading the data ...")
train_input_names,train_output_names, val_input_names, val_output_names, test_input_names, test_output_names = utils.prepare_data(dataset_dir=args.dataset)
print("\n***** Begin training *****")
print("Dataset -->", args.dataset)
print("Model -->", args.model)
print("Crop Height -->", args.crop_height)
print("Crop Width -->", args.crop_width)
print("Num Epochs -->", args.num_epochs)
print("Batch Size -->", args.batch_size)
print("Num Classes -->", num_classes)
print("Data Augmentation:")
print("\tVertical Flip -->", args.v_flip)
print("\tHorizontal Flip -->", args.h_flip)
print("\tBrightness Alteration -->", args.brightness)
print("\tRotation -->", args.rotation)
print("")
avg_loss_per_epoch = []
avg_scores_per_epoch = []
avg_iou_per_epoch = []
# Which validation images do we want
val_indices = []
num_vals = min(args.num_val_images, len(val_input_names))
# Set random seed to make sure models are validated on the same validation images.
# So you can compare the results of different models more intuitively.
random.seed(16)
val_indices=random.sample(range(0,len(val_input_names)),num_vals)
#Train
for epoch in range(args.epoch_start_i, args.num_epochs):
current_losses = []
cnt=0
id_list = np.random.permutation(len(train_input_names))
num_iters = int(np.floor(len(id_list) / args.batch_size))
st = time.time()
epoch_st=time.time()
for i in range(num_iters):
input_image_batch = []
output_image_batch = []
one_hot_out_batch = []
for j in range(args.batch_size):
index = i*args.batch_size + j
id = id_list[index]
input_image = utils.load_image(train_input_names[id])
output_image = utils.load_gt(train_output_names[id])
with tf.device('/cpu:0'):
input_image, output_image = data_augmentation(input_image, output_image)
input_image = np.float32(input_image) / 255.0
output_image = np.float32(output_image)
one_hot_out = np.float32(helpers.one_hot_it_gt(label=output_image, label_values=label_values))
input_image_batch.append(np.expand_dims(input_image, axis=0))
output_image_batch.append(np.expand_dims(output_image, axis=0))
one_hot_out_batch.append(np.expand_dims(one_hot_out, axis=0))
if args.batch_size == 1:
input_image_batch = input_image_batch[0]
output_image_batch = output_image_batch[0]
one_hot_out_batch = one_hot_out_batch[0]
else:
input_image_batch = np.squeeze(np.stack(input_image_batch, axis=1))
output_image_batch = np.squeeze(np.stack(output_image_batch, axis=1))
one_hot_out_batch = np.squeeze(np.stack(one_hot_out_batch, axis=0))
_,current=sess.run([opt,loss],feed_dict={net_input:input_image_batch,net_output:one_hot_out_batch,scalar_output:output_image_batch})
current_losses.append(current)
cnt = cnt + args.batch_size
if cnt % 20 == 0:
string_print = "Epoch = %d Count = %d Current_Loss = %.4f Time = %.2f"%(epoch,cnt,current,time.time()-st)
utils.LOG(string_print)
st = time.time()
mean_loss = np.mean(current_losses)
avg_loss_per_epoch.append(mean_loss)
# Save latest checkpoint to same file name
print("Saving latest checkpoint")
saver.save(sess,model_checkpoint_name)
if val_indices != 0 and epoch % args.checkpoint_step == 0:
if not os.path.isdir("%s/%s/%04d"%("checkpoints", args.model + args.dataset + "_one", epoch)):
os.makedirs("%s/%s/%04d"%("checkpoints", args.model + args.dataset + "_one", epoch))
print("Saving checkpoint for this epoch")
saver.save(sess,"%s/%s/%04d"%("checkpoints", args.model + args.dataset + "_one", epoch))
if epoch % args.validation_step == 0:
if not os.path.isdir("%s/%s/%04d"%("checkpoints", args.model + args.dataset + "_one", epoch)):
os.makedirs("%s/%s/%04d"%("checkpoints", args.model + args.dataset + "_one", epoch))
print("Performing validation")
target=open("%s/%s/%04d/val_scores.csv"%("checkpoints", args.model + args.dataset + "_one", epoch),'w')
target.write("val_name, avg_accuracy, precision, recall, f1 score, mean iou, %s\n" % (class_names_string))
scores_list = []
class_scores_list = []
precision_list = []
recall_list = []
f1_list = []
iou_list = []
# Do the validation on a small set of validation images
for ind in val_indices:
input_image = np.expand_dims(np.float32(utils.load_image(val_input_names[ind])[:args.crop_height, :args.crop_width]),axis=0)/255.0
gt = utils.load_gt(val_output_names[ind])[:args.crop_height, :args.crop_width]
gt = helpers.reverse_one_hot(helpers.one_hot_it_gt(gt, label_values))
output_image = sess.run(network,feed_dict={net_input:input_image})
output_image = np.array(output_image[0,:,:,:])
output_image = helpers.reverse_one_hot(output_image)
out_vis_image = helpers.colour_code_segmentation(output_image, label_values)
accuracy, class_accuracies, prec, rec, f1, iou = utils.evaluate_segmentation(pred=output_image, label=gt, num_classes=num_classes)
file_name = utils.filepath_to_name(val_input_names[ind])
target.write("%s, %f, %f, %f, %f, %f"%(file_name, accuracy, prec, rec, f1, iou))
for item in class_accuracies:
target.write(", %f"%(item))
target.write("\n")
scores_list.append(accuracy)
class_scores_list.append(class_accuracies)
precision_list.append(prec)
recall_list.append(rec)
f1_list.append(f1)
iou_list.append(iou)
target.close()
avg_score = np.mean(scores_list)
class_avg_scores = np.mean(class_scores_list, axis=0)
avg_scores_per_epoch.append(avg_score)
avg_precision = np.mean(precision_list)
avg_recall = np.mean(recall_list)
avg_f1 = np.mean(f1_list)
avg_iou = np.mean(iou_list)
avg_iou_per_epoch.append(avg_iou)
print("\nAverage validation accuracy for epoch # %04d = %f"% (epoch, avg_score))
print("Average per class validation accuracies for epoch # %04d:"% (epoch))
for index, item in enumerate(class_avg_scores):
print("%s = %f" % (class_names_list[index], item))
print("Validation precision = ", avg_precision)
print("Validation recall = ", avg_recall)
print("Validation F1 score = ", avg_f1)
print("Validation IoU score = ", avg_iou)
sys.stdout.flush()
epoch_time=time.time()-epoch_st
remain_time=epoch_time*(args.num_epochs-1-epoch)
m, s = divmod(remain_time, 60)
h, m = divmod(m, 60)
if s!=0:
train_time="Remaining training time = %d hours %d minutes %d seconds\n"%(h,m,s)
else:
train_time="Remaining training time : Training completed.\n"
utils.LOG(train_time)
scores_list = []
asghar=[iiii*args.validation_step for iiii in range(len(avg_iou_per_epoch))]
fig1, ax1 = plt.subplots(figsize=(11, 8))
ax1.plot(asghar, avg_scores_per_epoch)
ax1.set_title("Average validation accuracy vs epochs")
ax1.set_xlabel("Epoch")
ax1.set_ylabel("Avg. val. accuracy")
plt.savefig('accuracy_vs_epochs.png')
plt.clf()
fig2, ax2 = plt.subplots(figsize=(11, 8))
ax2.plot(range(epoch-args.epoch_start_i+1), avg_loss_per_epoch)
ax2.set_title("Average loss vs epochs")
ax2.set_xlabel("Epoch")
ax2.set_ylabel("Current loss")
plt.savefig('loss_vs_epochs.png')
plt.clf()
fig3, ax3 = plt.subplots(figsize=(11, 8))
ax3.plot(asghar, avg_iou_per_epoch)
ax3.set_title("Average IoU vs epochs")
ax3.set_xlabel("Epoch")
ax3.set_ylabel("Current IoU")
plt.savefig('iou_vs_epochs.png')
| 41.024324
| 274
| 0.684498
|
ae5270d1e81b456c58292fa389e87d041d118435
| 383
|
py
|
Python
|
dt/migrations/0002_datetime_twenty_four_hours.py
|
zembrodt/django-home
|
0a8d379028a1afd22ab9a6d7589672d623d8c6e8
|
[
"MIT"
] | null | null | null |
dt/migrations/0002_datetime_twenty_four_hours.py
|
zembrodt/django-home
|
0a8d379028a1afd22ab9a6d7589672d623d8c6e8
|
[
"MIT"
] | 16
|
2019-03-07T21:04:26.000Z
|
2022-03-11T23:42:00.000Z
|
dt/migrations/0002_datetime_twenty_four_hours.py
|
zembrodt/django-home
|
0a8d379028a1afd22ab9a6d7589672d623d8c6e8
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.3 on 2019-02-16 19:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dt', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='datetime',
name='twenty_four_hours',
field=models.BooleanField(default=True),
),
]
| 20.157895
| 52
| 0.5953
|
871dcc6b8d28b4aafb3d2045b3ba66ca5c9c3f6d
| 780
|
py
|
Python
|
vertica_python/vertica/messages/backend_messages/authentication.py
|
jakubjedelsky/vertica-python
|
f379576b6949638c90908f5ebded321dce9330e5
|
[
"MIT"
] | 1
|
2019-06-17T19:05:10.000Z
|
2019-06-17T19:05:10.000Z
|
vertica_python/vertica/messages/backend_messages/authentication.py
|
jakubjedelsky/vertica-python
|
f379576b6949638c90908f5ebded321dce9330e5
|
[
"MIT"
] | null | null | null |
vertica_python/vertica/messages/backend_messages/authentication.py
|
jakubjedelsky/vertica-python
|
f379576b6949638c90908f5ebded321dce9330e5
|
[
"MIT"
] | 2
|
2020-06-20T21:26:31.000Z
|
2021-04-03T10:44:40.000Z
|
from __future__ import print_function, division, absolute_import
from struct import unpack
from ..message import BackendMessage
class Authentication(BackendMessage):
message_id = b'R'
OK = 0
KERBEROS_V5 = 2
CLEARTEXT_PASSWORD = 3
CRYPT_PASSWORD = 4
MD5_PASSWORD = 5
SCM_CREDENTIAL = 6
GSS = 7
GSS_CONTINUE = 8
SSPI = 9
def __init__(self, data):
BackendMessage.__init__(self)
unpacked = unpack('!I{0}s'.format(len(data) - 4), data)
self.code = unpacked[0]
other = unpacked[1::][0]
if self.code in [self.CRYPT_PASSWORD, self.MD5_PASSWORD]:
self.salt = other
if self.code in [self.GSS_CONTINUE]:
self.auth_data = other
BackendMessage.register(Authentication)
| 23.636364
| 65
| 0.65
|
f764d80da33e8ddbfa585094e6ec89c871ac6da3
| 1,016
|
py
|
Python
|
dlkit/json_/proxy/record_templates.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/json_/proxy/record_templates.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/json_/proxy/record_templates.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""JSON implementations of proxy records."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ..osid import records as osid_records
from dlkit.abstract_osid.proxy import records as abc_proxy_records
class ProxyRecord(abc_proxy_records.ProxyRecord, osid_records.OsidRecord):
"""A record for a ``Proxy``.
The methods specified by the record type are available through the
underlying object.
"""
class ProxyConditionRecord(abc_proxy_records.ProxyConditionRecord, osid_records.OsidRecord):
"""A record for a ``ProxyCondition``.
The methods specified by the record type are available through the
underlying object.
"""
| 29.882353
| 92
| 0.759843
|
06a1fb78029883bf3f1a8ee3532361a743171c69
| 130
|
py
|
Python
|
app/db.py
|
lealssa/cvm_api
|
d3e468f6c81009aeb5dc358a39e22f109ea3315d
|
[
"MIT"
] | null | null | null |
app/db.py
|
lealssa/cvm_api
|
d3e468f6c81009aeb5dc358a39e22f109ea3315d
|
[
"MIT"
] | null | null | null |
app/db.py
|
lealssa/cvm_api
|
d3e468f6c81009aeb5dc358a39e22f109ea3315d
|
[
"MIT"
] | null | null | null |
import motor.motor_asyncio
from .config import settings
client = motor.motor_asyncio.AsyncIOMotorClient(settings.cvm_mongodb_url)
| 32.5
| 73
| 0.869231
|
a62219842fb04e48966f90a4b181478fa2be827d
| 1,591
|
py
|
Python
|
contrib/attic/pydl4j/pydl4j/__init__.py
|
eric-erki/deeplearning4j
|
b9d462f66879e9315767b70190bd2ab31b9a3275
|
[
"Apache-2.0"
] | null | null | null |
contrib/attic/pydl4j/pydl4j/__init__.py
|
eric-erki/deeplearning4j
|
b9d462f66879e9315767b70190bd2ab31b9a3275
|
[
"Apache-2.0"
] | null | null | null |
contrib/attic/pydl4j/pydl4j/__init__.py
|
eric-erki/deeplearning4j
|
b9d462f66879e9315767b70190bd2ab31b9a3275
|
[
"Apache-2.0"
] | null | null | null |
# /* ******************************************************************************
# * Copyright (c) 2021 Deeplearning4j Contributors
# *
# * This program and the accompanying materials are made available under the
# * terms of the Apache License, Version 2.0 which is available at
# * https://www.apache.org/licenses/LICENSE-2.0.
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# * License for the specific language governing permissions and limitations
# * under the License.
# *
# * SPDX-License-Identifier: Apache-2.0
# ******************************************************************************/
################################################################################
#
# This program and the accompanying materials are made available under the
# terms of the Apache License, Version 2.0 which is available at
# https://www.apache.org/licenses/LICENSE-2.0.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# SPDX-License-Identifier: Apache-2.0
################################################################################
from .pydl4j import *
from .jarmgr import *
from .mvn import *
| 45.457143
| 84
| 0.590195
|
31c8ea234e0baa68038d11dc325b9d00ffe8ae15
| 7,769
|
py
|
Python
|
invenio_rdm_records/records/api.py
|
caltechlibrary/invenio-rdm-records
|
5f35d82a3ed7caec5aa2350d62c26a021edf2d87
|
[
"MIT"
] | null | null | null |
invenio_rdm_records/records/api.py
|
caltechlibrary/invenio-rdm-records
|
5f35d82a3ed7caec5aa2350d62c26a021edf2d87
|
[
"MIT"
] | null | null | null |
invenio_rdm_records/records/api.py
|
caltechlibrary/invenio-rdm-records
|
5f35d82a3ed7caec5aa2350d62c26a021edf2d87
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2021 TU Wien.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""RDM Record and Draft API."""
from invenio_drafts_resources.records import Draft, Record
from invenio_drafts_resources.records.api import \
ParentRecord as ParentRecordBase
from invenio_pidstore.models import PIDStatus
from invenio_records.dumpers import ElasticsearchDumper
from invenio_records.dumpers.relations import RelationDumperExt
from invenio_records.systemfields import ConstantField, DictField, \
ModelField, RelationsField
from invenio_records_resources.records.api import FileRecord
from invenio_records_resources.records.systemfields import FilesField, \
IndexField, PIDListRelation, PIDNestedListRelation, PIDRelation, \
PIDStatusCheckField
from invenio_vocabularies.contrib.affiliations.api import Affiliation
from invenio_vocabularies.contrib.subjects.api import Subject
from invenio_vocabularies.records.api import Vocabulary
from . import models
from .dumpers import EDTFDumperExt, EDTFListDumperExt, GrantTokensDumperExt
from .systemfields import HasDraftCheckField, ParentRecordAccessField, \
RecordAccessField
#
# Parent record API
#
class RDMParent(ParentRecordBase):
"""Example parent record."""
# Configuration
model_cls = models.RDMParentMetadata
dumper = ElasticsearchDumper(
extensions=[
GrantTokensDumperExt("access.grant_tokens"),
]
)
# System fields
schema = ConstantField(
'$schema', 'local://records/parent-v1.0.0.json')
access = ParentRecordAccessField()
#
# Common properties between records and drafts.
#
class CommonFieldsMixin:
"""Common system fields between records and drafts."""
versions_model_cls = models.RDMVersionsState
parent_record_cls = RDMParent
schema = ConstantField(
'$schema', 'local://records/record-v4.0.0.json')
dumper = ElasticsearchDumper(
extensions=[
EDTFDumperExt('metadata.publication_date'),
EDTFListDumperExt("metadata.dates", "date"),
RelationDumperExt('relations'),
]
)
relations = RelationsField(
creator_affiliations=PIDNestedListRelation(
'metadata.creators',
relation_field='affiliations',
attrs=['id', 'name'],
pid_field=Affiliation.pid,
cache_key='affiliations',
),
contributor_affiliations=PIDNestedListRelation(
'metadata.contributors',
relation_field='affiliations',
attrs=['id', 'name'],
pid_field=Affiliation.pid,
cache_key='affiliations',
),
languages=PIDListRelation(
'metadata.languages',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('languages'),
cache_key='languages',
),
resource_type=PIDRelation(
'metadata.resource_type',
attrs=['id', 'title', 'props.type', 'props.subtype'],
pid_field=Vocabulary.pid.with_type_ctx('resourcetypes'),
cache_key='resource_type',
value_check=dict(tags=['depositable']),
),
subjects=PIDListRelation(
'metadata.subjects',
attrs=['id', 'subject', 'scheme'],
pid_field=Subject.pid,
cache_key='subjects',
),
licenses=PIDListRelation(
'metadata.rights',
attrs=['id', 'title', 'description', 'props.url', 'props.scheme'],
pid_field=Vocabulary.pid.with_type_ctx('licenses'),
cache_key='licenses',
),
related_identifiers=PIDListRelation(
'metadata.related_identifiers',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('resourcetypes'),
cache_key='resource_type',
relation_field='resource_type',
value_check=dict(tags=['linkable']),
),
title_types=PIDListRelation(
'metadata.additional_titles',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('titletypes'),
cache_key='title_type',
relation_field='type',
),
title_languages=PIDListRelation(
'metadata.additional_titles',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('languages'),
cache_key='languages',
relation_field='lang',
),
creators_role=PIDListRelation(
'metadata.creators',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('creatorsroles'),
cache_key='role',
relation_field='role'
),
contributors_role=PIDListRelation(
'metadata.contributors',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('contributorsroles'),
cache_key='role',
relation_field='role'
),
description_type=PIDListRelation(
'metadata.additional_descriptions',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('descriptiontypes'),
cache_key='description_type',
relation_field='type',
),
description_languages=PIDListRelation(
'metadata.additional_descriptions',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('languages'),
cache_key='languages',
relation_field='lang',
),
date_types=PIDListRelation(
'metadata.dates',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('datetypes'),
cache_key='date_types',
relation_field='type',
),
relation_types=PIDListRelation(
'metadata.related_identifiers',
attrs=['id', 'title'],
pid_field=Vocabulary.pid.with_type_ctx('relationtypes'),
cache_key='relation_types',
relation_field='relation_type',
),
)
bucket_id = ModelField(dump=False)
bucket = ModelField(dump=False)
access = RecordAccessField()
is_published = PIDStatusCheckField(status=PIDStatus.REGISTERED, dump=True)
pids = DictField("pids")
#
# Draft API
#
class RDMFileDraft(FileRecord):
"""File associated with a draft."""
model_cls = models.RDMFileDraftMetadata
record_cls = None # defined below
class RDMDraft(CommonFieldsMixin, Draft):
"""RDM draft API."""
model_cls = models.RDMDraftMetadata
index = IndexField(
"rdmrecords-drafts-draft-v4.0.0", search_alias="rdmrecords"
)
files = FilesField(
store=False,
file_cls=RDMFileDraft,
# Don't delete, we'll manage in the service
delete=False,
)
has_draft = HasDraftCheckField()
RDMFileDraft.record_cls = RDMDraft
#
# Record API
#
class RDMFileRecord(FileRecord):
"""Example record file API."""
model_cls = models.RDMFileRecordMetadata
record_cls = None # defined below
class RDMRecord(CommonFieldsMixin, Record):
"""RDM Record API."""
model_cls = models.RDMRecordMetadata
index = IndexField(
"rdmrecords-records-record-v4.0.0", search_alias="rdmrecords-records"
)
files = FilesField(
store=False,
file_cls=RDMFileRecord,
# Don't create
create=False,
# Don't delete, we'll manage in the service
delete=False,
)
has_draft = HasDraftCheckField(RDMDraft)
RDMFileRecord.record_cls = RDMRecord
| 30.347656
| 78
| 0.635346
|
e03f04222f8b45b76e66e53c434df130ae288aea
| 588
|
py
|
Python
|
test/test_cli.py
|
kdelee/ansible-builder
|
07cee927efca220ad17f66bba6fb4a42d9c09953
|
[
"Apache-2.0"
] | null | null | null |
test/test_cli.py
|
kdelee/ansible-builder
|
07cee927efca220ad17f66bba6fb4a42d9c09953
|
[
"Apache-2.0"
] | null | null | null |
test/test_cli.py
|
kdelee/ansible-builder
|
07cee927efca220ad17f66bba6fb4a42d9c09953
|
[
"Apache-2.0"
] | null | null | null |
from ansible_builder.cli import prepare
def test_custom_image(exec_env_definition_file, tmpdir):
content = {'version': 1}
path = str(exec_env_definition_file(content=content))
aee = prepare(['create', '-f', path, '-b', 'my-custom-image', '-c', str(tmpdir)])
assert aee.containerfile.base_image == 'my-custom-image'
def test_build_context(good_exec_env_definition_path, tmpdir):
path = str(good_exec_env_definition_path)
build_context = str(tmpdir)
aee = prepare(['create', '-f', path, '-c', build_context])
assert aee.build_context == build_context
| 30.947368
| 85
| 0.712585
|
1f58b6d5ff633650863dbd5953fe7100d8bce2c3
| 1,407
|
py
|
Python
|
redbot/message/headers/soapaction.py
|
thinkbox/redbot
|
90744dd971389bbf435d200483309b70b748785a
|
[
"Unlicense"
] | 1
|
2019-06-27T13:02:52.000Z
|
2019-06-27T13:02:52.000Z
|
redbot/message/headers/soapaction.py
|
thinkbox/redbot
|
90744dd971389bbf435d200483309b70b748785a
|
[
"Unlicense"
] | null | null | null |
redbot/message/headers/soapaction.py
|
thinkbox/redbot
|
90744dd971389bbf435d200483309b70b748785a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
__author__ = "Mark Nottingham <mnot@mnot.net>"
__copyright__ = """\
Copyright (c) 2008-2013 Mark Nottingham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import redbot.speak as rs
from redbot.message import headers as rh
from redbot.message import http_syntax as syntax
def parse(subject, value, red):
return value
@rh.SingleFieldValue
def join(subject, values, red):
return values[-1]
| 38.027027
| 77
| 0.78607
|
11060cecfdb7bd68bd7082ac490b00d26deeff8a
| 240,512
|
py
|
Python
|
run_unittests.py
|
felipealmeida/meson
|
2f72d4db0921ec3ce7c4cd9803c7af9f4ac776cf
|
[
"Apache-2.0"
] | null | null | null |
run_unittests.py
|
felipealmeida/meson
|
2f72d4db0921ec3ce7c4cd9803c7af9f4ac776cf
|
[
"Apache-2.0"
] | null | null | null |
run_unittests.py
|
felipealmeida/meson
|
2f72d4db0921ec3ce7c4cd9803c7af9f4ac776cf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import stat
import shlex
import subprocess
import re
import json
import tempfile
import textwrap
import os
import shutil
import sys
import unittest
import platform
import pickle
import functools
from itertools import chain
from unittest import mock
from configparser import ConfigParser
from glob import glob
from pathlib import (PurePath, Path)
import mesonbuild.mlog
import mesonbuild.compilers
import mesonbuild.environment
import mesonbuild.mesonlib
import mesonbuild.coredata
import mesonbuild.modules.gnome
from mesonbuild.interpreter import Interpreter, ObjectHolder
from mesonbuild.mesonlib import (
is_windows, is_osx, is_cygwin, is_dragonflybsd, is_openbsd, is_haiku,
windows_proof_rmtree, python_command, version_compare,
BuildDirLock, Version
)
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import MesonException, EnvironmentException
from mesonbuild.dependencies import PkgConfigDependency, ExternalProgram
from mesonbuild.build import Target
import mesonbuild.modules.pkgconfig
from run_tests import (
Backend, FakeBuild, FakeCompilerOptions,
ensure_backend_detects_changes, exe_suffix, get_backend_commands,
get_builddir_target_args, get_fake_env, get_fake_options, get_meson_script,
run_configure_inprocess, run_mtest_inprocess
)
def get_dynamic_section_entry(fname, entry):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF platforms')
try:
raw_out = subprocess.check_output(['readelf', '-d', fname],
universal_newlines=True)
except FileNotFoundError:
# FIXME: Try using depfixer.py:Elf() as a fallback
raise unittest.SkipTest('readelf not found')
pattern = re.compile(entry + r': \[(.*?)\]')
for line in raw_out.split('\n'):
m = pattern.search(line)
if m is not None:
return m.group(1)
return None # The file did not contain the specified entry.
def get_soname(fname):
return get_dynamic_section_entry(fname, 'soname')
def get_rpath(fname):
return get_dynamic_section_entry(fname, r'(?:rpath|runpath)')
def is_tarball():
if not os.path.isdir('docs'):
return True
return False
def is_ci():
if 'CI' in os.environ:
return True
return False
def _git_init(project_dir):
subprocess.check_call(['git', 'init'], cwd=project_dir, stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'config',
'user.name', 'Author Person'], cwd=project_dir)
subprocess.check_call(['git', 'config',
'user.email', 'teh_coderz@example.com'], cwd=project_dir)
subprocess.check_call('git add *', cwd=project_dir, shell=True,
stdout=subprocess.DEVNULL)
subprocess.check_call(['git', 'commit', '-a', '-m', 'I am a project'], cwd=project_dir,
stdout=subprocess.DEVNULL)
def skipIfNoExecutable(exename):
'''
Skip this test if the given executable is not found.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if shutil.which(exename) is None:
raise unittest.SkipTest(exename + ' not found')
return func(*args, **kwargs)
return wrapped
return wrapper
def skipIfNoPkgconfig(f):
'''
Skip this test if no pkg-config is found, unless we're on CI.
This allows users to run our test suite without having
pkg-config installed on, f.ex., macOS, while ensuring that our CI does not
silently skip the test because of misconfiguration.
Note: Yes, we provide pkg-config even while running Windows CI
'''
@functools.wraps(f)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
return f(*args, **kwargs)
return wrapped
def skipIfNoPkgconfigDep(depname):
'''
Skip this test if the given pkg-config dep is not found, unless we're on CI.
'''
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if not is_ci() and shutil.which('pkg-config') is None:
raise unittest.SkipTest('pkg-config not found')
if not is_ci() and subprocess.call(['pkg-config', '--exists', depname]) != 0:
raise unittest.SkipTest('pkg-config dependency {} not found.'.format(depname))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_not_language(lang):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
env = get_fake_env('', '', '')
f = getattr(env, 'detect_{}_compiler'.format(lang))
if lang in ['cs', 'vala', 'java', 'swift']:
f()
else:
f(False)
except EnvironmentException:
raise unittest.SkipTest('No {} compiler found.'.format(lang))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_env_value(value):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
if value in os.environ:
raise unittest.SkipTest(
'Environment variable "{}" set, skipping.'.format(value))
return func(*args, **kwargs)
return wrapped
return wrapper
def skip_if_not_base_option(feature):
"""Skip tests if The compiler does not support a given base option.
for example, ICC doesn't currently support b_sanitize.
"""
def actual(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
env = get_fake_env('', '', '')
cc = env.detect_c_compiler(False)
if feature not in cc.base_options:
raise unittest.SkipTest(
'{} not available with {}'.format(feature, cc.id))
return f(*args, **kwargs)
return wrapped
return actual
class PatchModule:
'''
Fancy monkey-patching! Whee! Can't use mock.patch because it only
patches in the local namespace.
'''
def __init__(self, func, name, impl):
self.func = func
assert(isinstance(name, str))
self.func_name = name
self.old_impl = None
self.new_impl = impl
def __enter__(self):
self.old_impl = self.func
exec('{} = self.new_impl'.format(self.func_name))
def __exit__(self, *args):
exec('{} = self.old_impl'.format(self.func_name))
class InternalTests(unittest.TestCase):
def test_version_number(self):
searchfunc = mesonbuild.environment.search_version
self.assertEqual(searchfunc('foobar 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('2016.10.28 1.2.3'), '1.2.3')
self.assertEqual(searchfunc('foobar 2016.10.128'), 'unknown version')
self.assertEqual(searchfunc('2016.10.128'), 'unknown version')
def test_mode_symbolic_to_bits(self):
modefunc = mesonbuild.mesonlib.FileMode.perms_s_to_bits
self.assertEqual(modefunc('---------'), 0)
self.assertEqual(modefunc('r--------'), stat.S_IRUSR)
self.assertEqual(modefunc('---r-----'), stat.S_IRGRP)
self.assertEqual(modefunc('------r--'), stat.S_IROTH)
self.assertEqual(modefunc('-w-------'), stat.S_IWUSR)
self.assertEqual(modefunc('----w----'), stat.S_IWGRP)
self.assertEqual(modefunc('-------w-'), stat.S_IWOTH)
self.assertEqual(modefunc('--x------'), stat.S_IXUSR)
self.assertEqual(modefunc('-----x---'), stat.S_IXGRP)
self.assertEqual(modefunc('--------x'), stat.S_IXOTH)
self.assertEqual(modefunc('--S------'), stat.S_ISUID)
self.assertEqual(modefunc('-----S---'), stat.S_ISGID)
self.assertEqual(modefunc('--------T'), stat.S_ISVTX)
self.assertEqual(modefunc('--s------'), stat.S_ISUID | stat.S_IXUSR)
self.assertEqual(modefunc('-----s---'), stat.S_ISGID | stat.S_IXGRP)
self.assertEqual(modefunc('--------t'), stat.S_ISVTX | stat.S_IXOTH)
self.assertEqual(modefunc('rwx------'), stat.S_IRWXU)
self.assertEqual(modefunc('---rwx---'), stat.S_IRWXG)
self.assertEqual(modefunc('------rwx'), stat.S_IRWXO)
# We could keep listing combinations exhaustively but that seems
# tedious and pointless. Just test a few more.
self.assertEqual(modefunc('rwxr-xr-x'),
stat.S_IRWXU |
stat.S_IRGRP | stat.S_IXGRP |
stat.S_IROTH | stat.S_IXOTH)
self.assertEqual(modefunc('rw-r--r--'),
stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP |
stat.S_IROTH)
self.assertEqual(modefunc('rwsr-x---'),
stat.S_IRWXU | stat.S_ISUID |
stat.S_IRGRP | stat.S_IXGRP)
def test_compiler_args_class(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
cc = mesonbuild.compilers.CCompiler([], 'fake', False)
# Test that bad initialization fails
self.assertRaises(TypeError, cargsfunc, [])
self.assertRaises(TypeError, cargsfunc, [], [])
self.assertRaises(TypeError, cargsfunc, cc, [], [])
# Test that empty initialization works
a = cargsfunc(cc)
self.assertEqual(a, [])
# Test that list initialization works
a = cargsfunc(['-I.', '-I..'], cc)
self.assertEqual(a, ['-I.', '-I..'])
# Test that there is no de-dup on initialization
self.assertEqual(cargsfunc(['-I.', '-I.'], cc), ['-I.', '-I.'])
## Test that appending works
a.append('-I..')
self.assertEqual(a, ['-I..', '-I.'])
a.append('-O3')
self.assertEqual(a, ['-I..', '-I.', '-O3'])
## Test that in-place addition works
a += ['-O2', '-O2']
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2', '-O2'])
# Test that removal works
a.remove('-O2')
self.assertEqual(a, ['-I..', '-I.', '-O3', '-O2'])
# Test that de-dup happens on addition
a += ['-Ifoo', '-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# .extend() is just +=, so we don't test it
## Test that addition works
# Test that adding a list with just one old arg works and yields the same array
a = a + ['-Ifoo']
self.assertEqual(a, ['-Ifoo', '-I..', '-I.', '-O3', '-O2'])
# Test that adding a list with one arg new and one old works
a = a + ['-Ifoo', '-Ibaz']
self.assertEqual(a, ['-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2'])
# Test that adding args that must be prepended and appended works
a = a + ['-Ibar', '-Wall']
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
## Test that reflected addition works
# Test that adding to a list with just one old arg works and yields the same array
a = ['-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-O3', '-O2', '-Wall'])
# Test that adding to a list with just one new arg that is not pre-pended works
a = ['-Werror'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with two new args preserves the order
a = ['-Ldir', '-Lbah'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
# Test that adding to a list with old args does nothing
a = ['-Ibar', '-Ibaz', '-Ifoo'] + a
self.assertEqual(a, ['-Ibar', '-Ifoo', '-Ibaz', '-I..', '-I.', '-Ldir', '-Lbah', '-Werror', '-O3', '-O2', '-Wall'])
## Test that adding libraries works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Adding a library and a libpath appends both correctly
l += ['-Lbardir', '-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
# Adding the same library again does nothing
l += ['-lbar']
self.assertEqual(l, ['-Lbardir', '-Lfoodir', '-lfoo', '-lbar'])
## Test that 'direct' append and extend works
l = cargsfunc(cc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l, ['-Lfoodir', '-lfoo'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l, ['-Lfoodir', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a'])
def test_compiler_args_class_gnuld(self):
cargsfunc = mesonbuild.compilers.CompilerArgs
## Test --start/end-group
gcc = mesonbuild.compilers.GnuCCompiler([], 'fake', mesonbuild.compilers.CompilerType.GCC_STANDARD, False)
## Test that 'direct' append and extend works
l = cargsfunc(gcc, ['-Lfoodir', '-lfoo'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Wl,--end-group'])
# Direct-adding a library and a libpath appends both correctly
l.extend_direct(['-Lbardir', '-lbar'])
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-Wl,--end-group'])
# Direct-adding the same library again still adds it
l.append_direct('-lbar')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '-Wl,--end-group'])
# Direct-adding with absolute path deduplicates
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding libbaz again does nothing
l.append_direct('/libbaz.a')
self.assertEqual(l.to_native(copy=True), ['-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group'])
# Adding a non-library argument doesn't include it in the group
l += ['-Lfoo', '-Wl,--export-dynamic']
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--end-group', '-Wl,--export-dynamic'])
# -Wl,-lfoo is detected as a library and gets added to the group
l.append('-Wl,-ldl')
self.assertEqual(l.to_native(copy=True), ['-Lfoo', '-Lfoodir', '-Wl,--start-group', '-lfoo', '-Lbardir', '-lbar', '-lbar', '/libbaz.a', '-Wl,--export-dynamic', '-Wl,-ldl', '-Wl,--end-group'])
def test_string_templates_substitution(self):
dictfunc = mesonbuild.mesonlib.get_filenames_templates_dict
substfunc = mesonbuild.mesonlib.substitute_values
ME = mesonbuild.mesonlib.MesonException
# Identity
self.assertEqual(dictfunc([], []), {})
# One input, no outputs
inputs = ['bar/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + [d['@PLAINNAME@'] + '.ok'] + cmd[2:])
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
# One input, one output
inputs = ['bar/foo.c.in']
outputs = ['out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': '.'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@.out', '@OUTPUT@', 'strings']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out'] + outputs + cmd[2:])
cmd = ['@INPUT0@.out', '@PLAINNAME@.ok', '@OUTPUT0@']
self.assertEqual(substfunc(cmd, d),
[inputs[0] + '.out', d['@PLAINNAME@'] + '.ok'] + outputs)
cmd = ['@INPUT@', '@BASENAME@.hah', 'strings']
self.assertEqual(substfunc(cmd, d),
inputs + [d['@BASENAME@'] + '.hah'] + cmd[2:])
# One input, one output with a subdir
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0],
'@PLAINNAME@': 'foo.c.in', '@BASENAME@': 'foo.c',
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Two inputs, no outputs
inputs = ['bar/foo.c.in', 'baz/foo.c.in']
outputs = []
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1]}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@INPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[1:])
cmd = ['@INPUT0@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out'] + cmd[1:])
cmd = ['@INPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [inputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
cmd = ['@INPUT0@', '@INPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), inputs + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Too many inputs
cmd = ['@PLAINNAME@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@BASENAME@']
self.assertRaises(ME, substfunc, cmd, d)
# No outputs
cmd = ['@OUTPUT@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTPUT0@']
self.assertRaises(ME, substfunc, cmd, d)
cmd = ['@OUTDIR@']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, one output
outputs = ['dir/out.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out'] + cmd[1:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', 'strings']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok'] + cmd[2:])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Two inputs, two outputs
outputs = ['dir/out.c', 'dir/out2.c']
ret = dictfunc(inputs, outputs)
d = {'@INPUT@': inputs, '@INPUT0@': inputs[0], '@INPUT1@': inputs[1],
'@OUTPUT@': outputs, '@OUTPUT0@': outputs[0], '@OUTPUT1@': outputs[1],
'@OUTDIR@': 'dir'}
# Check dictionary
self.assertEqual(ret, d)
# Check substitutions
cmd = ['some', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), cmd)
cmd = ['@OUTPUT@', 'ordinary', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[1:])
cmd = ['@OUTPUT0@', '@OUTPUT1@', 'strings']
self.assertEqual(substfunc(cmd, d), outputs + cmd[2:])
cmd = ['@OUTPUT0@.out', '@INPUT1@.ok', '@OUTDIR@']
self.assertEqual(substfunc(cmd, d), [outputs[0] + '.out', inputs[1] + '.ok', 'dir'])
# Many inputs, can't use @INPUT@ like this
cmd = ['@INPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough inputs
cmd = ['@INPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Not enough outputs
cmd = ['@OUTPUT2@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
# Many outputs, can't use @OUTPUT@ like this
cmd = ['@OUTPUT@.out', 'ordinary', 'strings']
self.assertRaises(ME, substfunc, cmd, d)
def test_needs_exe_wrapper_override(self):
config = ConfigParser()
config['binaries'] = {
'c': '\'/usr/bin/gcc\'',
}
config['host_machine'] = {
'system': '\'linux\'',
'cpu_family': '\'arm\'',
'cpu': '\'armv7\'',
'endian': '\'little\'',
}
# Can not be used as context manager because we need to
# open it a second time and this is not possible on
# Windows.
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.flush()
configfile.close()
opts = get_fake_options('')
opts.cross_file = configfilename
env = get_fake_env('', '', '', opts)
detected_value = env.need_exe_wrapper()
os.unlink(configfilename)
desired_value = not detected_value
config['properties'] = {
'needs_exe_wrapper': 'true' if desired_value else 'false'
}
configfile = tempfile.NamedTemporaryFile(mode='w+', delete=False)
configfilename = configfile.name
config.write(configfile)
configfile.close()
opts = get_fake_options('')
opts.cross_file = configfilename
env = get_fake_env('', '', '', opts)
forced_value = env.need_exe_wrapper()
os.unlink(configfilename)
self.assertEqual(forced_value, desired_value)
def test_listify(self):
listify = mesonbuild.mesonlib.listify
# Test sanity
self.assertEqual([1], listify(1))
self.assertEqual([], listify([]))
self.assertEqual([1], listify([1]))
# Test flattening
self.assertEqual([1, 2, 3], listify([1, [2, 3]]))
self.assertEqual([1, 2, 3], listify([1, [2, [3]]]))
self.assertEqual([1, [2, [3]]], listify([1, [2, [3]]], flatten=False))
# Test flattening and unholdering
holder1 = ObjectHolder(1)
holder3 = ObjectHolder(3)
self.assertEqual([holder1], listify(holder1))
self.assertEqual([holder1], listify([holder1]))
self.assertEqual([holder1, 2], listify([holder1, 2]))
self.assertEqual([holder1, 2, 3], listify([holder1, 2, [3]]))
self.assertEqual([1], listify(holder1, unholder=True))
self.assertEqual([1], listify([holder1], unholder=True))
self.assertEqual([1, 2], listify([holder1, 2], unholder=True))
self.assertEqual([1, 2, 3], listify([holder1, 2, [holder3]], unholder=True))
# Unholding doesn't work recursively when not flattening
self.assertEqual([1, [2], [holder3]], listify([holder1, [2], [holder3]], unholder=True, flatten=False))
def test_extract_as_list(self):
extract = mesonbuild.mesonlib.extract_as_list
# Test sanity
kwargs = {'sources': [1, 2, 3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources'))
self.assertEqual(kwargs, {'sources': [1, 2, 3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', pop=True))
self.assertEqual(kwargs, {})
# Test unholding
holder3 = ObjectHolder(3)
kwargs = {'sources': [1, 2, holder3]}
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True))
self.assertEqual(kwargs, {'sources': [1, 2, holder3]})
self.assertEqual([1, 2, 3], extract(kwargs, 'sources', unholder=True, pop=True))
self.assertEqual(kwargs, {})
# Test listification
kwargs = {'sources': [1, 2, 3], 'pch_sources': [4, 5, 6]}
self.assertEqual([[1, 2, 3], [4, 5, 6]], extract(kwargs, 'sources', 'pch_sources'))
def test_pkgconfig_module(self):
class Mock:
pass
mock = Mock()
mock.pcdep = Mock()
mock.pcdep.name = "some_name"
mock.version_reqs = []
# pkgconfig dependency as lib
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_libs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
# pkgconfig dependency as requires
deps = mesonbuild.modules.pkgconfig.DependenciesHelper("thislib")
deps.add_pub_reqs([mock])
self.assertEqual(deps.format_reqs(deps.pub_reqs), "some_name")
def _test_all_naming(self, cc, env, patterns, platform):
shr = patterns[platform]['shared']
stc = patterns[platform]['static']
p = cc.get_library_naming(env, 'shared')
self.assertEqual(p, shr)
p = cc.get_library_naming(env, 'static')
self.assertEqual(p, stc)
p = cc.get_library_naming(env, 'static-shared')
self.assertEqual(p, stc + shr)
p = cc.get_library_naming(env, 'shared-static')
self.assertEqual(p, shr + stc)
p = cc.get_library_naming(env, 'default')
self.assertEqual(p, shr + stc)
# Test find library by mocking up openbsd
if platform != 'openbsd':
return
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'libfoo.so.6.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.5.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.54.0'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.66a.0b'), 'w') as f:
f.write('')
with open(os.path.join(tmpdir, 'libfoo.so.70.0.so.1'), 'w') as f:
f.write('')
found = cc.find_library_real('foo', env, [tmpdir], '', 'default')
self.assertEqual(os.path.basename(found[0]), 'libfoo.so.54.0')
def test_find_library_patterns(self):
'''
Unit test for the library search patterns used by find_library()
'''
unix_static = ('lib{}.a', '{}.a')
msvc_static = ('lib{}.a', 'lib{}.lib', '{}.a', '{}.lib')
# This is the priority list of pattern matching for library searching
patterns = {'openbsd': {'shared': ('lib{}.so', '{}.so', 'lib{}.so.[0-9]*.[0-9]*'),
'static': unix_static},
'linux': {'shared': ('lib{}.so', '{}.so'),
'static': unix_static},
'darwin': {'shared': ('lib{}.dylib', 'lib{}.so', '{}.dylib', '{}.so'),
'static': unix_static},
'cygwin': {'shared': ('cyg{}.dll', 'cyg{}.dll.a', 'lib{}.dll',
'lib{}.dll.a', '{}.dll', '{}.dll.a'),
'static': ('cyg{}.a',) + unix_static},
'windows-msvc': {'shared': ('lib{}.lib', '{}.lib'),
'static': msvc_static},
'windows-mingw': {'shared': ('lib{}.dll.a', 'lib{}.lib', 'lib{}.dll',
'{}.dll.a', '{}.lib', '{}.dll'),
'static': msvc_static}}
env = get_fake_env('', '', '')
cc = env.detect_c_compiler(False)
if is_osx():
self._test_all_naming(cc, env, patterns, 'darwin')
elif is_cygwin():
self._test_all_naming(cc, env, patterns, 'cygwin')
elif is_windows():
if cc.get_argument_syntax() == 'msvc':
self._test_all_naming(cc, env, patterns, 'windows-msvc')
else:
self._test_all_naming(cc, env, patterns, 'windows-mingw')
else:
self._test_all_naming(cc, env, patterns, 'linux')
# Mock OpenBSD since we don't have tests for it
true = lambda x, y: True
if not is_openbsd():
with PatchModule(mesonbuild.compilers.c.for_openbsd,
'mesonbuild.compilers.c.for_openbsd', true):
self._test_all_naming(cc, env, patterns, 'openbsd')
else:
self._test_all_naming(cc, env, patterns, 'openbsd')
with PatchModule(mesonbuild.compilers.c.for_darwin,
'mesonbuild.compilers.c.for_darwin', true):
self._test_all_naming(cc, env, patterns, 'darwin')
with PatchModule(mesonbuild.compilers.c.for_cygwin,
'mesonbuild.compilers.c.for_cygwin', true):
self._test_all_naming(cc, env, patterns, 'cygwin')
with PatchModule(mesonbuild.compilers.c.for_windows,
'mesonbuild.compilers.c.for_windows', true):
self._test_all_naming(cc, env, patterns, 'windows-mingw')
def test_pkgconfig_parse_libs(self):
'''
Unit test for parsing of pkg-config output to search for libraries
https://github.com/mesonbuild/meson/issues/3951
'''
with tempfile.TemporaryDirectory() as tmpdir:
pkgbin = ExternalProgram('pkg-config', command=['pkg-config'], silent=True)
env = get_fake_env('', '', '')
compiler = env.detect_c_compiler(False)
env.coredata.compilers = {'c': compiler}
env.coredata.compiler_options['c_link_args'] = FakeCompilerOptions()
p1 = Path(tmpdir) / '1'
p2 = Path(tmpdir) / '2'
p1.mkdir()
p2.mkdir()
# libfoo.a is in one prefix
(p1 / 'libfoo.a').open('w').close()
# libbar.a is in both prefixes
(p1 / 'libbar.a').open('w').close()
(p2 / 'libbar.a').open('w').close()
# Ensure that we never statically link to these
(p1 / 'libpthread.a').open('w').close()
(p1 / 'libm.a').open('w').close()
(p1 / 'libc.a').open('w').close()
(p1 / 'libdl.a').open('w').close()
(p1 / 'librt.a').open('w').close()
def fake_call_pkgbin(self, args, env=None):
if '--libs' not in args:
return 0, ''
if args[0] == 'foo':
return 0, '-L{} -lfoo -L{} -lbar'.format(p2.as_posix(), p1.as_posix())
if args[0] == 'bar':
return 0, '-L{} -lbar'.format(p2.as_posix())
if args[0] == 'internal':
return 0, '-L{} -lpthread -lm -lc -lrt -ldl'.format(p1.as_posix())
old_call = PkgConfigDependency._call_pkgbin
old_check = PkgConfigDependency.check_pkgconfig
old_pkgbin = PkgConfigDependency.class_pkgbin
PkgConfigDependency._call_pkgbin = fake_call_pkgbin
PkgConfigDependency.check_pkgconfig = lambda x, _: pkgbin
# Test begins
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('foo', env, kwargs)
self.assertEqual(foo_dep.get_link_args(),
[(p1 / 'libfoo.a').as_posix(), (p2 / 'libbar.a').as_posix()])
bar_dep = PkgConfigDependency('bar', env, kwargs)
self.assertEqual(bar_dep.get_link_args(), [(p2 / 'libbar.a').as_posix()])
internal_dep = PkgConfigDependency('internal', env, kwargs)
if compiler.get_argument_syntax() == 'msvc':
self.assertEqual(internal_dep.get_link_args(), [])
else:
link_args = internal_dep.get_link_args()
for link_arg in link_args:
for lib in ('pthread', 'm', 'c', 'dl', 'rt'):
self.assertNotIn('lib{}.a'.format(lib), link_arg, msg=link_args)
# Test ends
PkgConfigDependency._call_pkgbin = old_call
PkgConfigDependency.check_pkgconfig = old_check
# Reset dependency class to ensure that in-process configure doesn't mess up
PkgConfigDependency.pkgbin_cache = {}
PkgConfigDependency.class_pkgbin = old_pkgbin
def test_version_compare(self):
comparefunc = mesonbuild.mesonlib.version_compare_many
for (a, b, result) in [
('0.99.beta19', '>= 0.99.beta14', True),
]:
self.assertEqual(comparefunc(a, b)[0], result)
for (a, b, result) in [
# examples from https://fedoraproject.org/wiki/Archive:Tools/RPM/VersionComparison
("1.0010", "1.9", 1),
("1.05", "1.5", 0),
("1.0", "1", 1),
("2.50", "2.5", 1),
("fc4", "fc.4", 0),
("FC5", "fc4", -1),
("2a", "2.0", -1),
("1.0", "1.fc4", 1),
("3.0.0_fc", "3.0.0.fc", 0),
# from RPM tests
("1.0", "1.0", 0),
("1.0", "2.0", -1),
("2.0", "1.0", 1),
("2.0.1", "2.0.1", 0),
("2.0", "2.0.1", -1),
("2.0.1", "2.0", 1),
("2.0.1a", "2.0.1a", 0),
("2.0.1a", "2.0.1", 1),
("2.0.1", "2.0.1a", -1),
("5.5p1", "5.5p1", 0),
("5.5p1", "5.5p2", -1),
("5.5p2", "5.5p1", 1),
("5.5p10", "5.5p10", 0),
("5.5p1", "5.5p10", -1),
("5.5p10", "5.5p1", 1),
("10xyz", "10.1xyz", -1),
("10.1xyz", "10xyz", 1),
("xyz10", "xyz10", 0),
("xyz10", "xyz10.1", -1),
("xyz10.1", "xyz10", 1),
("xyz.4", "xyz.4", 0),
("xyz.4", "8", -1),
("8", "xyz.4", 1),
("xyz.4", "2", -1),
("2", "xyz.4", 1),
("5.5p2", "5.6p1", -1),
("5.6p1", "5.5p2", 1),
("5.6p1", "6.5p1", -1),
("6.5p1", "5.6p1", 1),
("6.0.rc1", "6.0", 1),
("6.0", "6.0.rc1", -1),
("10b2", "10a1", 1),
("10a2", "10b2", -1),
("1.0aa", "1.0aa", 0),
("1.0a", "1.0aa", -1),
("1.0aa", "1.0a", 1),
("10.0001", "10.0001", 0),
("10.0001", "10.1", 0),
("10.1", "10.0001", 0),
("10.0001", "10.0039", -1),
("10.0039", "10.0001", 1),
("4.999.9", "5.0", -1),
("5.0", "4.999.9", 1),
("20101121", "20101121", 0),
("20101121", "20101122", -1),
("20101122", "20101121", 1),
("2_0", "2_0", 0),
("2.0", "2_0", 0),
("2_0", "2.0", 0),
("a", "a", 0),
("a+", "a+", 0),
("a+", "a_", 0),
("a_", "a+", 0),
("+a", "+a", 0),
("+a", "_a", 0),
("_a", "+a", 0),
("+_", "+_", 0),
("_+", "+_", 0),
("_+", "_+", 0),
("+", "_", 0),
("_", "+", 0),
# other tests
('0.99.beta19', '0.99.beta14', 1),
("1.0.0", "2.0.0", -1),
(".0.0", "2.0.0", -1),
("alpha", "beta", -1),
("1.0", "1.0.0", -1),
("2.456", "2.1000", -1),
("2.1000", "3.111", -1),
("2.001", "2.1", 0),
("2.34", "2.34", 0),
("6.1.2", "6.3.8", -1),
("1.7.3.0", "2.0.0", -1),
("2.24.51", "2.25", -1),
("2.1.5+20120813+gitdcbe778", "2.1.5", 1),
("3.4.1", "3.4b1", 1),
("041206", "200090325", -1),
("0.6.2+git20130413", "0.6.2", 1),
("2.6.0+bzr6602", "2.6.0", 1),
("2.6.0", "2.6b2", 1),
("2.6.0+bzr6602", "2.6b2x", 1),
("0.6.7+20150214+git3a710f9", "0.6.7", 1),
("15.8b", "15.8.0.1", -1),
("1.2rc1", "1.2.0", -1),
]:
ver_a = Version(a)
ver_b = Version(b)
self.assertEqual(ver_a.__cmp__(ver_b), result)
self.assertEqual(ver_b.__cmp__(ver_a), -result)
@unittest.skipIf(is_tarball(), 'Skipping because this is a tarball release')
class DataTests(unittest.TestCase):
def test_snippets(self):
hashcounter = re.compile('^ *(#)+')
snippet_dir = Path('docs/markdown/snippets')
self.assertTrue(snippet_dir.is_dir())
for f in snippet_dir.glob('*'):
self.assertTrue(f.is_file())
if f.parts[-1].endswith('~'):
continue
if f.suffix == '.md':
in_code_block = False
with f.open() as snippet:
for line in snippet:
if line.startswith(' '):
continue
if line.startswith('```'):
in_code_block = not in_code_block
if in_code_block:
continue
m = re.match(hashcounter, line)
if m:
self.assertEqual(len(m.group(0)), 2, 'All headings in snippets must have two hash symbols: ' + f.name)
self.assertFalse(in_code_block, 'Unclosed code block.')
else:
if f.name != 'add_release_note_snippets_here':
self.assertTrue(False, 'A file without .md suffix in snippets dir: ' + f.name)
def test_compiler_options_documented(self):
'''
Test that C and C++ compiler options and base options are documented in
Builtin-Options.md. Only tests the default compiler for the current
platform on the CI.
'''
md = None
with open('docs/markdown/Builtin-options.md') as f:
md = f.read()
self.assertIsNotNone(md)
env = get_fake_env('', '', '')
# FIXME: Support other compilers
cc = env.detect_c_compiler(False)
cpp = env.detect_cpp_compiler(False)
for comp in (cc, cpp):
for opt in comp.get_options().keys():
self.assertIn(opt, md)
for opt in comp.base_options:
self.assertIn(opt, md)
self.assertNotIn('b_unknown', md)
def test_cpu_families_documented(self):
with open("docs/markdown/Reference-tables.md") as f:
md = f.read()
self.assertIsNotNone(md)
sections = list(re.finditer(r"^## (.+)$", md, re.MULTILINE))
for s1, s2 in zip(sections[::2], sections[1::2]):
if s1.group(1) == "CPU families":
# Extract the content for this section
content = md[s1.end():s2.start()]
# Find the list entries
arches = [m.group(1) for m in re.finditer(r"^\| (\w+) +\|", content, re.MULTILINE)]
# Drop the header
arches = set(arches[1:])
self.assertEqual(arches, set(mesonbuild.environment.known_cpu_families))
def test_markdown_files_in_sitemap(self):
'''
Test that each markdown files in docs/markdown is referenced in sitemap.txt
'''
with open("docs/sitemap.txt") as f:
md = f.read()
self.assertIsNotNone(md)
toc = list(m.group(1) for m in re.finditer(r"^\s*(\w.*)$", md, re.MULTILINE))
markdownfiles = [f.name for f in Path("docs/markdown").iterdir() if f.is_file() and f.suffix == '.md']
exceptions = ['_Sidebar.md']
for f in markdownfiles:
if f not in exceptions:
self.assertIn(f, toc)
def test_syntax_highlighting_files(self):
'''
Ensure that syntax highlighting files were updated for new functions in
the global namespace in build files.
'''
env = get_fake_env('', '', '')
interp = Interpreter(FakeBuild(env), mock=True)
with open('data/syntax-highlighting/vim/syntax/meson.vim') as f:
res = re.search(r'syn keyword mesonBuiltin(\s+\\\s\w+)+', f.read(), re.MULTILINE)
defined = set([a.strip() for a in res.group().split('\\')][1:])
self.assertEqual(defined, set(chain(interp.funcs.keys(), interp.builtin.keys())))
class BasePlatformTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
src_root = os.path.join(os.getcwd(), src_root)
self.src_root = src_root
self.prefix = '/usr'
self.libdir = 'lib'
# Get the backend
# FIXME: Extract this from argv?
self.backend = getattr(Backend, os.environ.get('MESON_UNIT_TEST_BACKEND', 'ninja'))
self.meson_args = ['--backend=' + self.backend.name]
self.meson_cross_file = None
self.meson_command = python_command + [get_meson_script()]
self.setup_command = self.meson_command + self.meson_args
self.mconf_command = self.meson_command + ['configure']
self.mintro_command = self.meson_command + ['introspect']
self.wrap_command = self.meson_command + ['wrap']
# Backend-specific build commands
self.build_command, self.clean_command, self.test_command, self.install_command, \
self.uninstall_command = get_backend_commands(self.backend)
# Test directories
self.common_test_dir = os.path.join(src_root, 'test cases/common')
self.vala_test_dir = os.path.join(src_root, 'test cases/vala')
self.framework_test_dir = os.path.join(src_root, 'test cases/frameworks')
self.unit_test_dir = os.path.join(src_root, 'test cases/unit')
# Misc stuff
self.orig_env = os.environ.copy()
if self.backend is Backend.ninja:
self.no_rebuild_stdout = ['ninja: no work to do.', 'samu: nothing to do']
else:
# VS doesn't have a stable output when no changes are done
# XCode backend is untested with unit tests, help welcome!
self.no_rebuild_stdout = ['UNKNOWN BACKEND {!r}'.format(self.backend.name)]
self.builddirs = []
self.new_builddir()
def change_builddir(self, newdir):
self.builddir = newdir
self.privatedir = os.path.join(self.builddir, 'meson-private')
self.logdir = os.path.join(self.builddir, 'meson-logs')
self.installdir = os.path.join(self.builddir, 'install')
self.distdir = os.path.join(self.builddir, 'meson-dist')
self.mtest_command = self.meson_command + ['test', '-C', self.builddir]
self.builddirs.append(self.builddir)
def new_builddir(self):
# In case the directory is inside a symlinked directory, find the real
# path otherwise we might not find the srcdir from inside the builddir.
newdir = os.path.realpath(tempfile.mkdtemp())
self.change_builddir(newdir)
def _print_meson_log(self):
log = os.path.join(self.logdir, 'meson-log.txt')
if not os.path.isfile(log):
print("{!r} doesn't exist".format(log))
return
with open(log, 'r', encoding='utf-8') as f:
print(f.read())
def tearDown(self):
for path in self.builddirs:
try:
windows_proof_rmtree(path)
except FileNotFoundError:
pass
os.environ.clear()
os.environ.update(self.orig_env)
super().tearDown()
def _run(self, command, workdir=None):
'''
Run a command while printing the stdout and stderr to stdout,
and also return a copy of it
'''
# If this call hangs CI will just abort. It is very hard to distinguish
# between CI issue and test bug in that case. Set timeout and fail loud
# instead.
p = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=os.environ.copy(),
universal_newlines=True, cwd=workdir, timeout=60 * 5)
print(p.stdout)
if p.returncode != 0:
if 'MESON_SKIP_TEST' in p.stdout:
raise unittest.SkipTest('Project requested skipping.')
raise subprocess.CalledProcessError(p.returncode, command, output=p.stdout)
return p.stdout
def init(self, srcdir, extra_args=None, default_args=True, inprocess=False):
self.assertPathExists(srcdir)
if extra_args is None:
extra_args = []
if not isinstance(extra_args, list):
extra_args = [extra_args]
args = [srcdir, self.builddir]
if default_args:
args += ['--prefix', self.prefix,
'--libdir', self.libdir]
if self.meson_cross_file:
args += ['--cross-file', self.meson_cross_file]
self.privatedir = os.path.join(self.builddir, 'meson-private')
if inprocess:
try:
(returncode, out, err) = run_configure_inprocess(self.meson_args + args + extra_args)
if 'MESON_SKIP_TEST' in out:
raise unittest.SkipTest('Project requested skipping.')
if returncode != 0:
self._print_meson_log()
print('Stdout:\n')
print(out)
print('Stderr:\n')
print(err)
raise RuntimeError('Configure failed')
except:
self._print_meson_log()
raise
finally:
# Close log file to satisfy Windows file locking
mesonbuild.mlog.shutdown()
mesonbuild.mlog.log_dir = None
mesonbuild.mlog.log_file = None
else:
try:
out = self._run(self.setup_command + args + extra_args)
except unittest.SkipTest:
raise unittest.SkipTest('Project requested skipping: ' + srcdir)
except:
self._print_meson_log()
raise
return out
def build(self, target=None, extra_args=None):
if extra_args is None:
extra_args = []
# Add arguments for building the target (if specified),
# and using the build dir (if required, with VS)
args = get_builddir_target_args(self.backend, self.builddir, target)
return self._run(self.build_command + args + extra_args, workdir=self.builddir)
def clean(self):
dir_args = get_builddir_target_args(self.backend, self.builddir, None)
self._run(self.clean_command + dir_args, workdir=self.builddir)
def run_tests(self, inprocess=False):
if not inprocess:
self._run(self.test_command, workdir=self.builddir)
else:
run_mtest_inprocess(['-C', self.builddir])
def install(self, *, use_destdir=True):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
if use_destdir:
os.environ['DESTDIR'] = self.installdir
self._run(self.install_command, workdir=self.builddir)
def uninstall(self):
self._run(self.uninstall_command, workdir=self.builddir)
def run_target(self, target):
'''
Run a Ninja target while printing the stdout and stderr to stdout,
and also return a copy of it
'''
return self.build(target=target)
def setconf(self, arg, will_build=True):
if not isinstance(arg, list):
arg = [arg]
if will_build:
ensure_backend_detects_changes(self.backend)
self._run(self.mconf_command + arg + [self.builddir])
def wipe(self):
windows_proof_rmtree(self.builddir)
def utime(self, f):
ensure_backend_detects_changes(self.backend)
os.utime(f)
def get_compdb(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Compiler db not available with {} backend'.format(self.backend.name))
try:
with open(os.path.join(self.builddir, 'compile_commands.json')) as ifile:
contents = json.load(ifile)
except FileNotFoundError:
raise unittest.SkipTest('Compiler db not found')
# If Ninja is using .rsp files, generate them, read their contents, and
# replace it as the command for all compile commands in the parsed json.
if len(contents) > 0 and contents[0]['command'].endswith('.rsp'):
# Pretend to build so that the rsp files are generated
self.build(extra_args=['-d', 'keeprsp', '-n'])
for each in contents:
# Extract the actual command from the rsp file
compiler, rsp = each['command'].split(' @')
rsp = os.path.join(self.builddir, rsp)
# Replace the command with its contents
with open(rsp, 'r', encoding='utf-8') as f:
each['command'] = compiler + ' ' + f.read()
return contents
def get_meson_log(self):
with open(os.path.join(self.builddir, 'meson-logs', 'meson-log.txt')) as f:
return f.readlines()
def get_meson_log_compiler_checks(self):
'''
Fetch a list command-lines run by meson for compiler checks.
Each command-line is returned as a list of arguments.
'''
log = self.get_meson_log()
prefix = 'Command line:'
cmds = [l[len(prefix):].split() for l in log if l.startswith(prefix)]
return cmds
def introspect(self, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [self.builddir],
universal_newlines=True)
return json.loads(out)
def introspect_directory(self, directory, args):
if isinstance(args, str):
args = [args]
out = subprocess.check_output(self.mintro_command + args + [directory],
universal_newlines=True)
try:
obj = json.loads(out)
except Exception as e:
print(out)
raise e
return obj
def assertPathEqual(self, path1, path2):
'''
Handles a lot of platform-specific quirks related to paths such as
separator, case-sensitivity, etc.
'''
self.assertEqual(PurePath(path1), PurePath(path2))
def assertPathListEqual(self, pathlist1, pathlist2):
self.assertEqual(len(pathlist1), len(pathlist2))
worklist = list(zip(pathlist1, pathlist2))
for i in worklist:
if i[0] is None:
self.assertEqual(i[0], i[1])
else:
self.assertPathEqual(i[0], i[1])
def assertPathBasenameEqual(self, path, basename):
msg = '{!r} does not end with {!r}'.format(path, basename)
# We cannot use os.path.basename because it returns '' when the path
# ends with '/' for some silly reason. This is not how the UNIX utility
# `basename` works.
path_basename = PurePath(path).parts[-1]
self.assertEqual(PurePath(path_basename), PurePath(basename), msg)
def assertBuildIsNoop(self):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn(ret.split('\n')[-2], self.no_rebuild_stdout)
elif self.backend is Backend.vs:
# Ensure that some target said that no rebuild was done
self.assertIn('CustomBuild:\n All outputs are up-to-date.', ret)
self.assertIn('ClCompile:\n All outputs are up-to-date.', ret)
self.assertIn('Link:\n All outputs are up-to-date.', ret)
# Ensure that no targets were built
clre = re.compile('ClCompile:\n [^\n]*cl', flags=re.IGNORECASE)
linkre = re.compile('Link:\n [^\n]*link', flags=re.IGNORECASE)
self.assertNotRegex(ret, clre)
self.assertNotRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertRebuiltTarget(self, target):
ret = self.build()
if self.backend is Backend.ninja:
self.assertIn('Linking target {}'.format(target), ret)
elif self.backend is Backend.vs:
# Ensure that this target was rebuilt
linkre = re.compile('Link:\n [^\n]*link[^\n]*' + target, flags=re.IGNORECASE)
self.assertRegex(ret, linkre)
elif self.backend is Backend.xcode:
raise unittest.SkipTest('Please help us fix this test on the xcode backend')
else:
raise RuntimeError('Invalid backend: {!r}'.format(self.backend.name))
def assertPathExists(self, path):
m = 'Path {!r} should exist'.format(path)
self.assertTrue(os.path.exists(path), msg=m)
def assertPathDoesNotExist(self, path):
m = 'Path {!r} should not exist'.format(path)
self.assertFalse(os.path.exists(path), msg=m)
class AllPlatformTests(BasePlatformTests):
'''
Tests that should run on all platforms
'''
def test_default_options_prefix(self):
'''
Tests that setting a prefix in default_options in project() works.
Can't be an ordinary test because we pass --prefix to meson there.
https://github.com/mesonbuild/meson/issues/1349
'''
testdir = os.path.join(self.common_test_dir, '91 default options')
self.init(testdir, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
prefix = opt['value']
self.assertEqual(prefix, '/absoluteprefix')
def test_absolute_prefix_libdir(self):
'''
Tests that setting absolute paths for --prefix and --libdir work. Can't
be an ordinary test because these are set via the command-line.
https://github.com/mesonbuild/meson/issues/1341
https://github.com/mesonbuild/meson/issues/1345
'''
testdir = os.path.join(self.common_test_dir, '91 default options')
prefix = '/someabs'
libdir = 'libdir'
extra_args = ['--prefix=' + prefix,
# This can just be a relative path, but we want to test
# that passing this as an absolute path also works
'--libdir=' + prefix + '/' + libdir]
self.init(testdir, extra_args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
if opt['name'] == 'prefix':
self.assertEqual(prefix, opt['value'])
elif opt['name'] == 'libdir':
self.assertEqual(libdir, opt['value'])
def test_libdir_must_be_inside_prefix(self):
'''
Tests that libdir is forced to be inside prefix no matter how it is set.
Must be a unit test for obvious reasons.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
# libdir being inside prefix is ok
args = ['--prefix', '/opt', '--libdir', '/opt/lib32']
self.init(testdir, args)
self.wipe()
# libdir not being inside prefix is not ok
args = ['--prefix', '/usr', '--libdir', '/opt/lib32']
self.assertRaises(subprocess.CalledProcessError, self.init, testdir, args)
self.wipe()
# libdir must be inside prefix even when set via mesonconf
self.init(testdir)
self.assertRaises(subprocess.CalledProcessError, self.setconf, '-Dlibdir=/opt', False)
def test_prefix_dependent_defaults(self):
'''
Tests that configured directory paths are set to prefix dependent
defaults.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
expected = {
'/opt': {'prefix': '/opt',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': 'var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': 'com',
'sysconfdir': 'etc'},
'/usr': {'prefix': '/usr',
'bindir': 'bin', 'datadir': 'share', 'includedir': 'include',
'infodir': 'share/info',
'libexecdir': 'libexec', 'localedir': 'share/locale',
'localstatedir': '/var', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/lib',
'sysconfdir': '/etc'},
'/usr/local': {'prefix': '/usr/local',
'bindir': 'bin', 'datadir': 'share',
'includedir': 'include', 'infodir': 'share/info',
'libexecdir': 'libexec',
'localedir': 'share/locale',
'localstatedir': '/var/local', 'mandir': 'share/man',
'sbindir': 'sbin', 'sharedstatedir': '/var/local/lib',
'sysconfdir': 'etc'},
# N.B. We don't check 'libdir' as it's platform dependent, see
# default_libdir():
}
for prefix in expected:
args = ['--prefix', prefix]
self.init(testdir, args, default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[prefix]:
self.assertEqual(value, expected[prefix][name])
self.wipe()
def test_default_options_prefix_dependent_defaults(self):
'''
Tests that setting a prefix in default_options in project() sets prefix
dependent defaults for other options, and that those defaults can
be overridden in default_options or by the command line.
'''
testdir = os.path.join(self.common_test_dir, '169 default options prefix dependent defaults')
expected = {
'':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--prefix=/usr':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/sharedstate'},
'--sharedstatedir=/var/state':
{'prefix': '/usr',
'sysconfdir': '/etc',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
'--sharedstatedir=/var/state --prefix=/usr --sysconfdir=sysconf':
{'prefix': '/usr',
'sysconfdir': 'sysconf',
'localstatedir': '/var',
'sharedstatedir': '/var/state'},
}
for args in expected:
self.init(testdir, args.split(), default_args=False)
opts = self.introspect('--buildoptions')
for opt in opts:
name = opt['name']
value = opt['value']
if name in expected[args]:
self.assertEqual(value, expected[args][name])
self.wipe()
def test_static_library_overwrite(self):
'''
Tests that static libraries are never appended to, always overwritten.
Has to be a unit test because this involves building a project,
reconfiguring, and building it again so that `ar` is run twice on the
same static library.
https://github.com/mesonbuild/meson/issues/1355
'''
testdir = os.path.join(self.common_test_dir, '3 static')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(False)
static_linker = env.detect_static_linker(cc)
if is_windows():
raise unittest.SkipTest('https://github.com/mesonbuild/meson/issues/1526')
if not isinstance(static_linker, mesonbuild.linkers.ArLinker):
raise unittest.SkipTest('static linker is not `ar`')
# Configure
self.init(testdir)
# Get name of static library
targets = self.introspect('--targets')
self.assertEqual(len(targets), 1)
libname = targets[0]['filename'][0]
# Build and get contents of static library
self.build()
before = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
before = [f for f in before if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(before), 1, msg=before)
# Change the source to be built into the static library
self.setconf('-Dsource=libfile2.c')
self.build()
after = self._run(['ar', 't', os.path.join(self.builddir, libname)]).split()
# Filter out non-object-file contents
after = [f for f in after if f.endswith(('.o', '.obj'))]
# Static library should contain only one object
self.assertEqual(len(after), 1, msg=after)
# and the object must have changed
self.assertNotEqual(before, after)
def test_static_compile_order(self):
'''
Test that the order of files in a compiler command-line while compiling
and linking statically is deterministic. This can't be an ordinary test
case because we need to inspect the compiler database.
https://github.com/mesonbuild/meson/pull/951
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
self.init(testdir)
compdb = self.get_compdb()
# Rules will get written out in this order
self.assertTrue(compdb[0]['file'].endswith("libfile.c"))
self.assertTrue(compdb[1]['file'].endswith("libfile2.c"))
self.assertTrue(compdb[2]['file'].endswith("libfile3.c"))
self.assertTrue(compdb[3]['file'].endswith("libfile4.c"))
# FIXME: We don't have access to the linker command
def test_run_target_files_path(self):
'''
Test that run_targets are run from the correct directory
https://github.com/mesonbuild/meson/issues/957
'''
testdir = os.path.join(self.common_test_dir, '55 run target')
self.init(testdir)
self.run_target('check_exists')
def test_install_introspection(self):
'''
Tests that the Meson introspection API exposes install filenames correctly
https://github.com/mesonbuild/meson/issues/829
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/lib/libstat.a'])
self.assertPathListEqual(intro[1]['install_filename'], ['/usr/bin/prog' + exe_suffix])
def test_install_introspection_multiple_outputs(self):
'''
Tests that the Meson introspection API exposes multiple install filenames correctly without crashing
https://github.com/mesonbuild/meson/pull/4555
Reverted to the first file only because of https://github.com/mesonbuild/meson/pull/4547#discussion_r244173438
TODO Change the format to a list officialy in a followup PR
'''
if self.backend is not Backend.ninja:
raise unittest.SkipTest('{!r} backend can\'t install files'.format(self.backend.name))
testdir = os.path.join(self.common_test_dir, '145 custom target multiple outputs')
self.init(testdir)
intro = self.introspect('--targets')
if intro[0]['type'] == 'executable':
intro = intro[::-1]
self.assertPathListEqual(intro[0]['install_filename'], ['/usr/include/diff.h', '/usr/bin/diff.sh'])
self.assertPathListEqual(intro[1]['install_filename'], ['/opt/same.h', '/opt/same.sh'])
self.assertPathListEqual(intro[2]['install_filename'], ['/usr/include/first.h', None])
self.assertPathListEqual(intro[3]['install_filename'], [None, '/usr/bin/second.sh'])
def test_uninstall(self):
exename = os.path.join(self.installdir, 'usr/bin/prog' + exe_suffix)
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
self.assertPathDoesNotExist(exename)
self.install()
self.assertPathExists(exename)
self.uninstall()
self.assertPathDoesNotExist(exename)
def test_forcefallback(self):
testdir = os.path.join(self.unit_test_dir, '31 forcefallback')
self.init(testdir, ['--wrap-mode=forcefallback'])
self.build()
self.run_tests()
def test_testsetups(self):
if not shutil.which('valgrind'):
raise unittest.SkipTest('Valgrind not installed.')
testdir = os.path.join(self.unit_test_dir, '2 testsetups')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
basic_log = f.read()
# Run buggy test with setup that has env that will make it fail
self.assertRaises(subprocess.CalledProcessError,
self._run, self.mtest_command + ['--setup=valgrind'])
with open(os.path.join(self.logdir, 'testlog-valgrind.txt')) as f:
vg_log = f.read()
self.assertFalse('TEST_ENV is set' in basic_log)
self.assertFalse('Memcheck' in basic_log)
self.assertTrue('TEST_ENV is set' in vg_log)
self.assertTrue('Memcheck' in vg_log)
# Run buggy test with setup without env that will pass
self._run(self.mtest_command + ['--setup=wrapper'])
# Setup with no properties works
self._run(self.mtest_command + ['--setup=empty'])
# Setup with only env works
self._run(self.mtest_command + ['--setup=onlyenv'])
self._run(self.mtest_command + ['--setup=onlyenv2'])
self._run(self.mtest_command + ['--setup=onlyenv3'])
# Setup with only a timeout works
self._run(self.mtest_command + ['--setup=timeout'])
def test_testsetup_selection(self):
testdir = os.path.join(self.unit_test_dir, '14 testsetup selection')
self.init(testdir)
self.build()
# Run tests without setup
self.run_tests()
self.assertRaises(subprocess.CalledProcessError, self._run, self.mtest_command + ['--setup=missingfromfoo'])
self._run(self.mtest_command + ['--setup=missingfromfoo', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=worksforall'])
self._run(self.mtest_command + ['--setup=main:worksforall'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:'])
self._run(self.mtest_command + ['--setup=onlyinbar', '--no-suite=main:', '--no-suite=foo:'])
self._run(self.mtest_command + ['--setup=bar:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=foo:onlyinbar'])
self.assertRaises(subprocess.CalledProcessError, self._run,
self.mtest_command + ['--setup=main:onlyinbar'])
def test_testsetup_default(self):
testdir = os.path.join(self.unit_test_dir, '47 testsetup default')
self.init(testdir)
self.build()
# Run tests without --setup will cause the default setup to be used
self.run_tests()
with open(os.path.join(self.logdir, 'testlog.txt')) as f:
default_log = f.read()
# Run tests with explicitly using the same setup that is set as default
self._run(self.mtest_command + ['--setup=mydefault'])
with open(os.path.join(self.logdir, 'testlog-mydefault.txt')) as f:
mydefault_log = f.read()
# Run tests with another setup
self._run(self.mtest_command + ['--setup=other'])
with open(os.path.join(self.logdir, 'testlog-other.txt')) as f:
other_log = f.read()
self.assertTrue('ENV_A is 1' in default_log)
self.assertTrue('ENV_B is 2' in default_log)
self.assertTrue('ENV_C is 2' in default_log)
self.assertTrue('ENV_A is 1' in mydefault_log)
self.assertTrue('ENV_B is 2' in mydefault_log)
self.assertTrue('ENV_C is 2' in mydefault_log)
self.assertTrue('ENV_A is 1' in other_log)
self.assertTrue('ENV_B is 3' in other_log)
self.assertTrue('ENV_C is 2' in other_log)
def assertFailedTestCount(self, failure_count, command):
try:
self._run(command)
self.assertEqual(0, failure_count, 'Expected %d tests to fail.' % failure_count)
except subprocess.CalledProcessError as e:
self.assertEqual(e.returncode, failure_count)
def test_suite_selection(self):
testdir = os.path.join(self.unit_test_dir, '4 suite selection')
self.init(testdir)
self.build()
self.assertFailedTestCount(3, self.mtest_command)
self.assertFailedTestCount(0, self.mtest_command + ['--suite', ':success'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', ':fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', ':success'])
self.assertFailedTestCount(0, self.mtest_command + ['--no-suite', ':fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'mainprj'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjsucc'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjmix'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'mainprj:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'mainprj:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'mainprj:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'mainprj:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjfail:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjfail:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjfail:success'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjsucc:success'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjsucc:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjsucc:success'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjmix:fail'])
self.assertFailedTestCount(0, self.mtest_command + ['--suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--no-suite', 'subprjmix:success'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix:fail'])
self.assertFailedTestCount(3, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj'])
self.assertFailedTestCount(2, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail'])
self.assertFailedTestCount(1, self.mtest_command + ['--suite', 'subprjfail', '--suite', 'subprjmix', '--suite', 'mainprj', '--no-suite', 'subprjmix:fail', 'mainprj-failing_test'])
self.assertFailedTestCount(1, self.mtest_command + ['--no-suite', 'subprjfail:fail', '--no-suite', 'subprjmix:fail'])
def test_build_by_default(self):
testdir = os.path.join(self.common_test_dir, '134 build by default')
self.init(testdir)
self.build()
genfile1 = os.path.join(self.builddir, 'generated1.dat')
genfile2 = os.path.join(self.builddir, 'generated2.dat')
exe1 = os.path.join(self.builddir, 'fooprog' + exe_suffix)
exe2 = os.path.join(self.builddir, 'barprog' + exe_suffix)
self.assertPathExists(genfile1)
self.assertPathExists(genfile2)
self.assertPathDoesNotExist(exe1)
self.assertPathDoesNotExist(exe2)
self.build(target=('fooprog' + exe_suffix))
self.assertPathExists(exe1)
self.build(target=('barprog' + exe_suffix))
self.assertPathExists(exe2)
def test_internal_include_order(self):
testdir = os.path.join(self.common_test_dir, '135 include order')
self.init(testdir)
execmd = fxecmd = None
for cmd in self.get_compdb():
if 'someexe' in cmd['command']:
execmd = cmd['command']
continue
if 'somefxe' in cmd['command']:
fxecmd = cmd['command']
continue
if not execmd or not fxecmd:
raise Exception('Could not find someexe and somfxe commands')
# Check include order for 'someexe'
incs = [a for a in shlex.split(execmd) if a.startswith("-I")]
self.assertEqual(len(incs), 9)
# target private dir
someexe_id = Target.construct_id_from_path("sub4", "someexe", "@exe")
self.assertPathEqual(incs[0], "-I" + os.path.join("sub4", someexe_id))
# target build subdir
self.assertPathEqual(incs[1], "-Isub4")
# target source subdir
self.assertPathBasenameEqual(incs[2], 'sub4')
# include paths added via per-target c_args: ['-I'...]
self.assertPathBasenameEqual(incs[3], 'sub3')
# target include_directories: build dir
self.assertPathEqual(incs[4], "-Isub2")
# target include_directories: source dir
self.assertPathBasenameEqual(incs[5], 'sub2')
# target internal dependency include_directories: build dir
self.assertPathEqual(incs[6], "-Isub1")
# target internal dependency include_directories: source dir
self.assertPathBasenameEqual(incs[7], 'sub1')
# custom target include dir
self.assertPathEqual(incs[8], '-Ictsub')
# Check include order for 'somefxe'
incs = [a for a in shlex.split(fxecmd) if a.startswith('-I')]
self.assertEqual(len(incs), 9)
# target private dir
self.assertPathEqual(incs[0], '-Isomefxe@exe')
# target build dir
self.assertPathEqual(incs[1], '-I.')
# target source dir
self.assertPathBasenameEqual(incs[2], os.path.basename(testdir))
# target internal dependency correct include_directories: build dir
self.assertPathEqual(incs[3], "-Isub4")
# target internal dependency correct include_directories: source dir
self.assertPathBasenameEqual(incs[4], 'sub4')
# target internal dependency dep include_directories: build dir
self.assertPathEqual(incs[5], "-Isub1")
# target internal dependency dep include_directories: source dir
self.assertPathBasenameEqual(incs[6], 'sub1')
# target internal dependency wrong include_directories: build dir
self.assertPathEqual(incs[7], "-Isub2")
# target internal dependency wrong include_directories: source dir
self.assertPathBasenameEqual(incs[8], 'sub2')
def test_compiler_detection(self):
'''
Test that automatic compiler detection and setting from the environment
both work just fine. This is needed because while running project tests
and other unit tests, we always read CC/CXX/etc from the environment.
'''
gnu = mesonbuild.compilers.GnuCompiler
clang = mesonbuild.compilers.ClangCompiler
intel = mesonbuild.compilers.IntelCompiler
msvc = mesonbuild.compilers.VisualStudioCCompiler
clangcl = mesonbuild.compilers.ClangClCCompiler
ar = mesonbuild.linkers.ArLinker
lib = mesonbuild.linkers.VisualStudioLinker
langs = [('c', 'CC'), ('cpp', 'CXX')]
if not is_windows():
langs += [('objc', 'OBJC'), ('objcpp', 'OBJCXX')]
testdir = os.path.join(self.unit_test_dir, '5 compiler detection')
env = get_fake_env(testdir, self.builddir, self.prefix)
for lang, evar in langs:
# Detect with evar and do sanity checks on that
if evar in os.environ:
ecc = getattr(env, 'detect_{}_compiler'.format(lang))(False)
self.assertTrue(ecc.version)
elinker = env.detect_static_linker(ecc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop(evar)
# Very rough/strict heuristics. Would never work for actual
# compiler detection, but should be ok for the tests.
ebase = os.path.basename(evalue)
if ebase.startswith('g') or ebase.endswith(('-gcc', '-g++')):
self.assertIsInstance(ecc, gnu)
self.assertIsInstance(elinker, ar)
elif 'clang-cl' in ebase:
self.assertIsInstance(ecc, clangcl)
self.assertIsInstance(elinker, lib)
elif 'clang' in ebase:
self.assertIsInstance(ecc, clang)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('ic'):
self.assertIsInstance(ecc, intel)
self.assertIsInstance(elinker, ar)
elif ebase.startswith('cl'):
self.assertIsInstance(ecc, msvc)
self.assertIsInstance(elinker, lib)
else:
raise AssertionError('Unknown compiler {!r}'.format(evalue))
# Check that we actually used the evalue correctly as the compiler
self.assertEqual(ecc.get_exelist(), shlex.split(evalue))
# Do auto-detection of compiler based on platform, PATH, etc.
cc = getattr(env, 'detect_{}_compiler'.format(lang))(False)
self.assertTrue(cc.version)
linker = env.detect_static_linker(cc)
# Check compiler type
if isinstance(cc, gnu):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_OSX)
elif is_windows():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_MINGW)
elif is_cygwin():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_CYGWIN)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.GCC_STANDARD)
if isinstance(cc, clang):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_OSX)
elif is_windows():
# Not implemented yet
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_MINGW)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.CLANG_STANDARD)
if isinstance(cc, intel):
self.assertIsInstance(linker, ar)
if is_osx():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_OSX)
elif is_windows():
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_WIN)
else:
self.assertEqual(cc.compiler_type, mesonbuild.compilers.CompilerType.ICC_STANDARD)
if isinstance(cc, msvc):
self.assertTrue(is_windows())
self.assertIsInstance(linker, lib)
self.assertEqual(cc.id, 'msvc')
self.assertTrue(hasattr(cc, 'is_64'))
# If we're on Windows CI, we know what the compiler will be
if 'arch' in os.environ:
if os.environ['arch'] == 'x64':
self.assertTrue(cc.is_64)
else:
self.assertFalse(cc.is_64)
# Set evar ourselves to a wrapper script that just calls the same
# exelist + some argument. This is meant to test that setting
# something like `ccache gcc -pipe` or `distcc ccache gcc` works.
wrapper = os.path.join(testdir, 'compiler wrapper.py')
wrappercc = python_command + [wrapper] + cc.get_exelist() + ['-DSOME_ARG']
wrappercc_s = ''
for w in wrappercc:
wrappercc_s += shlex.quote(w) + ' '
os.environ[evar] = wrappercc_s
wcc = getattr(env, 'detect_{}_compiler'.format(lang))(False)
# Check static linker too
wrapperlinker = python_command + [wrapper] + linker.get_exelist() + linker.get_always_args()
wrapperlinker_s = ''
for w in wrapperlinker:
wrapperlinker_s += shlex.quote(w) + ' '
os.environ['AR'] = wrapperlinker_s
wlinker = env.detect_static_linker(wcc)
# Pop it so we don't use it for the next detection
evalue = os.environ.pop('AR')
# Must be the same type since it's a wrapper around the same exelist
self.assertIs(type(cc), type(wcc))
self.assertIs(type(linker), type(wlinker))
# Ensure that the exelist is correct
self.assertEqual(wcc.get_exelist(), wrappercc)
self.assertEqual(wlinker.get_exelist(), wrapperlinker)
# Ensure that the version detection worked correctly
self.assertEqual(cc.version, wcc.version)
if hasattr(cc, 'is_64'):
self.assertEqual(cc.is_64, wcc.is_64)
def test_always_prefer_c_compiler_for_asm(self):
testdir = os.path.join(self.common_test_dir, '138 c cpp and asm')
# Skip if building with MSVC
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(False).get_id() == 'msvc':
raise unittest.SkipTest('MSVC can\'t compile assembly')
self.init(testdir)
commands = {'c-asm': {}, 'cpp-asm': {}, 'cpp-c-asm': {}, 'c-cpp-asm': {}}
for cmd in self.get_compdb():
# Get compiler
split = shlex.split(cmd['command'])
if split[0] == 'ccache':
compiler = split[1]
else:
compiler = split[0]
# Classify commands
if 'Ic-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-asm']['c'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Icpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-asm?'.format(cmd['command']))
elif 'Ic-cpp-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['c-cpp-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['c-cpp-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['c-cpp-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in c-cpp-asm?'.format(cmd['command']))
elif 'Icpp-c-asm' in cmd['command']:
if cmd['file'].endswith('.S'):
commands['cpp-c-asm']['asm'] = compiler
elif cmd['file'].endswith('.c'):
commands['cpp-c-asm']['c'] = compiler
elif cmd['file'].endswith('.cpp'):
commands['cpp-c-asm']['cpp'] = compiler
else:
raise AssertionError('{!r} found in cpp-c-asm?'.format(cmd['command']))
else:
raise AssertionError('Unknown command {!r} found'.format(cmd['command']))
# Check that .S files are always built with the C compiler
self.assertEqual(commands['c-asm']['asm'], commands['c-asm']['c'])
self.assertEqual(commands['c-asm']['asm'], commands['cpp-asm']['asm'])
self.assertEqual(commands['cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['c-cpp-asm']['asm'], commands['c-cpp-asm']['c'])
self.assertEqual(commands['cpp-c-asm']['asm'], commands['cpp-c-asm']['c'])
self.assertNotEqual(commands['cpp-asm']['asm'], commands['cpp-asm']['cpp'])
self.assertNotEqual(commands['c-cpp-asm']['c'], commands['c-cpp-asm']['cpp'])
self.assertNotEqual(commands['cpp-c-asm']['c'], commands['cpp-c-asm']['cpp'])
# Check that the c-asm target is always linked with the C linker
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('build c-asm.*: c_LINKER', contents)
self.assertIsNotNone(m, msg=contents)
def test_preprocessor_checks_CPPFLAGS(self):
'''
Test that preprocessor compiler checks read CPPFLAGS but not CFLAGS
'''
testdir = os.path.join(self.common_test_dir, '137 get define')
define = 'MESON_TEST_DEFINE_VALUE'
# NOTE: this list can't have \n, ' or "
# \n is never substituted by the GNU pre-processor via a -D define
# ' and " confuse shlex.split() even when they are escaped
# % and # confuse the MSVC preprocessor
# !, ^, *, and < confuse lcc preprocessor
value = 'spaces and fun@$&()-=_+{}[]:;>?,./~`'
os.environ['CPPFLAGS'] = '-D{}="{}"'.format(define, value)
os.environ['CFLAGS'] = '-DMESON_FAIL_VALUE=cflags-read'.format(define)
self.init(testdir, ['-D{}={}'.format(define, value)])
def test_custom_target_exe_data_deterministic(self):
testdir = os.path.join(self.common_test_dir, '114 custom target capture')
self.init(testdir)
meson_exe_dat1 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.wipe()
self.init(testdir)
meson_exe_dat2 = glob(os.path.join(self.privatedir, 'meson_exe*.dat'))
self.assertListEqual(meson_exe_dat1, meson_exe_dat2)
def test_source_changes_cause_rebuild(self):
'''
Test that changes to sources and headers cause rebuilds, but not
changes to unused files (as determined by the dependency file) in the
input files list.
'''
testdir = os.path.join(self.common_test_dir, '20 header in file list')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of header.h should rebuild everything
self.utime(os.path.join(testdir, 'header.h'))
self.assertRebuiltTarget('prog')
def test_custom_target_changes_cause_rebuild(self):
'''
Test that in a custom target, changes to the input files, the
ExternalProgram, and any File objects on the command-line cause
a rebuild.
'''
testdir = os.path.join(self.common_test_dir, '61 custom header generator')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of these should rebuild everything
for f in ('input.def', 'makeheader.py', 'somefile.txt'):
self.utime(os.path.join(testdir, f))
self.assertRebuiltTarget('prog')
def test_static_library_lto(self):
'''
Test that static libraries can be built with LTO and linked to
executables. On Linux, this requires the use of gcc-ar.
https://github.com/mesonbuild/meson/issues/1646
'''
testdir = os.path.join(self.common_test_dir, '5 linkstatic')
env = get_fake_env(testdir, self.builddir, self.prefix)
if env.detect_c_compiler(False).get_id() == 'clang' and is_windows():
raise unittest.SkipTest('LTO not (yet) supported by windows clang')
self.init(testdir, extra_args='-Db_lto=true')
self.build()
self.run_tests()
def test_dist_git(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
try:
self.dist_impl(_git_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def test_dist_hg(self):
if not shutil.which('hg'):
raise unittest.SkipTest('Mercurial not found')
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Dist is only supported with Ninja')
def hg_init(project_dir):
subprocess.check_call(['hg', 'init'], cwd=project_dir)
with open(os.path.join(project_dir, '.hg', 'hgrc'), 'w') as f:
print('[ui]', file=f)
print('username=Author Person <teh_coderz@example.com>', file=f)
subprocess.check_call(['hg', 'add', 'meson.build', 'distexe.c'], cwd=project_dir)
subprocess.check_call(['hg', 'commit', '-m', 'I am a project'], cwd=project_dir)
try:
self.dist_impl(hg_init)
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the hg files so cleaning up the dir
# fails sometimes.
pass
def test_dist_git_script(self):
if not shutil.which('git'):
raise unittest.SkipTest('Git not found')
try:
with tempfile.TemporaryDirectory() as tmpdir:
project_dir = os.path.join(tmpdir, 'a')
shutil.copytree(os.path.join(self.unit_test_dir, '35 dist script'),
project_dir)
_git_init(project_dir)
self.init(project_dir)
self.build('dist')
except PermissionError:
# When run under Windows CI, something (virus scanner?)
# holds on to the git files so cleaning up the dir
# fails sometimes.
pass
def dist_impl(self, vcs_init):
# Create this on the fly because having rogue .git directories inside
# the source tree leads to all kinds of trouble.
with tempfile.TemporaryDirectory() as project_dir:
with open(os.path.join(project_dir, 'meson.build'), 'w') as ofile:
ofile.write('''project('disttest', 'c', version : '1.4.3')
e = executable('distexe', 'distexe.c')
test('dist test', e)
''')
with open(os.path.join(project_dir, 'distexe.c'), 'w') as ofile:
ofile.write('''#include<stdio.h>
int main(int argc, char **argv) {
printf("I am a distribution test.\\n");
return 0;
}
''')
vcs_init(project_dir)
self.init(project_dir)
self.build('dist')
distfile = os.path.join(self.distdir, 'disttest-1.4.3.tar.xz')
checksumfile = distfile + '.sha256sum'
self.assertPathExists(distfile)
self.assertPathExists(checksumfile)
def test_rpath_uses_ORIGIN(self):
'''
Test that built targets use $ORIGIN in rpath, which ensures that they
are relocatable and ensures that builds are reproducible since the
build directory won't get embedded into the built binaries.
'''
if is_windows() or is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.common_test_dir, '43 library chain')
self.init(testdir)
self.build()
for each in ('prog', 'subdir/liblib1.so', ):
rpath = get_rpath(os.path.join(self.builddir, each))
self.assertTrue(rpath, 'Rpath could not be determined for {}.'.format(each))
if is_dragonflybsd():
# DragonflyBSD will prepend /usr/lib/gccVERSION to the rpath,
# so ignore that.
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
rpaths = rpath.split(':')[1:]
else:
rpaths = rpath.split(':')
for path in rpaths:
self.assertTrue(path.startswith('$ORIGIN'), msg=(each, path))
# These two don't link to anything else, so they do not need an rpath entry.
for each in ('subdir/subdir2/liblib2.so', 'subdir/subdir3/liblib3.so'):
rpath = get_rpath(os.path.join(self.builddir, each))
if is_dragonflybsd():
# The rpath should be equal to /usr/lib/gccVERSION
self.assertTrue(rpath.startswith('/usr/lib/gcc'))
self.assertEqual(len(rpath.split(':')), 1)
else:
self.assertTrue(rpath is None)
def test_dash_d_dedup(self):
testdir = os.path.join(self.unit_test_dir, '9 d dedup')
self.init(testdir)
cmd = self.get_compdb()[0]['command']
self.assertTrue('-D FOO -D BAR' in cmd or
'"-D" "FOO" "-D" "BAR"' in cmd or
'/D FOO /D BAR' in cmd or
'"/D" "FOO" "/D" "BAR"' in cmd)
def test_all_forbidden_targets_tested(self):
'''
Test that all forbidden targets are tested in the '155 reserved targets'
test. Needs to be a unit test because it accesses Meson internals.
'''
testdir = os.path.join(self.common_test_dir, '155 reserved targets')
targets = mesonbuild.coredata.forbidden_target_names
# We don't actually define a target with this name
targets.pop('build.ninja')
# Remove this to avoid multiple entries with the same name
# but different case.
targets.pop('PHONY')
for i in targets:
self.assertPathExists(os.path.join(testdir, i))
def detect_prebuild_env(self):
env = get_fake_env('', self.builddir, self.prefix)
cc = env.detect_c_compiler(False)
stlinker = env.detect_static_linker(cc)
if mesonbuild.mesonlib.is_windows():
object_suffix = 'obj'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_cygwin():
object_suffix = 'o'
shared_suffix = 'dll'
elif mesonbuild.mesonlib.is_osx():
object_suffix = 'o'
shared_suffix = 'dylib'
else:
object_suffix = 'o'
shared_suffix = 'so'
return (cc, stlinker, object_suffix, shared_suffix)
def pbcompile(self, compiler, source, objectfile, extra_args=[]):
cmd = compiler.get_exelist()
if compiler.get_argument_syntax() == 'msvc':
cmd += ['/nologo', '/Fo' + objectfile, '/c', source] + extra_args
else:
cmd += ['-c', source, '-o', objectfile] + extra_args
subprocess.check_call(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def test_prebuilt_object(self):
(compiler, _, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '15 prebuilt object')
source = os.path.join(tdir, 'source.c')
objectfile = os.path.join(tdir, 'prebuilt.' + object_suffix)
self.pbcompile(compiler, source, objectfile)
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(objectfile)
def build_static_lib(self, compiler, linker, source, objectfile, outfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = ['lib', '/NOLOGO', '/OUT:' + outfile, objectfile]
else:
link_cmd = ['ar', 'csr', outfile, objectfile]
link_cmd = linker.get_exelist()
link_cmd += linker.get_always_args()
link_cmd += linker.get_std_link_args()
link_cmd += linker.get_output_args(outfile)
link_cmd += [objectfile]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_static_lib(self):
(cc, stlinker, object_suffix, _) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '16 prebuilt static')
source = os.path.join(tdir, 'libdir/best.c')
objectfile = os.path.join(tdir, 'libdir/best.' + object_suffix)
stlibfile = os.path.join(tdir, 'libdir/libbest.a')
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
def build_shared_lib(self, compiler, source, objectfile, outfile, impfile, extra_args=None):
if extra_args is None:
extra_args = []
if compiler.get_argument_syntax() == 'msvc':
link_cmd = compiler.get_linker_exelist() + [
'/NOLOGO', '/DLL', '/DEBUG', '/IMPLIB:' + impfile,
'/OUT:' + outfile, objectfile]
else:
if not (compiler.compiler_type.is_windows_compiler or
compiler.compiler_type.is_osx_compiler):
extra_args += ['-fPIC']
link_cmd = compiler.get_exelist() + ['-shared', '-o', outfile, objectfile]
if not mesonbuild.mesonlib.is_osx():
link_cmd += ['-Wl,-soname=' + os.path.basename(outfile)]
self.pbcompile(compiler, source, objectfile, extra_args=extra_args)
try:
subprocess.check_call(link_cmd)
finally:
os.unlink(objectfile)
def test_prebuilt_shared_lib(self):
(cc, _, object_suffix, shared_suffix) = self.detect_prebuild_env()
tdir = os.path.join(self.unit_test_dir, '17 prebuilt shared')
source = os.path.join(tdir, 'alexandria.c')
objectfile = os.path.join(tdir, 'alexandria.' + object_suffix)
impfile = os.path.join(tdir, 'alexandria.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(tdir, 'alexandria.' + shared_suffix)
elif is_cygwin():
shlibfile = os.path.join(tdir, 'cygalexandria.' + shared_suffix)
else:
shlibfile = os.path.join(tdir, 'libalexandria.' + shared_suffix)
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run the test
try:
self.init(tdir)
self.build()
self.run_tests()
finally:
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(tdir, 'alexandria.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_static(self):
'''
Test that the we prefer static libraries when `static: true` is
passed to dependency() with pkg-config. Can't be an ordinary test
because we need to build libs and try to find them from meson.build
Also test that it's not a hard error to have unsatisfiable library deps
since system libraries -lm will never be found statically.
https://github.com/mesonbuild/meson/issues/2785
'''
(cc, stlinker, objext, shext) = self.detect_prebuild_env()
testdir = os.path.join(self.unit_test_dir, '18 pkgconfig static')
source = os.path.join(testdir, 'foo.c')
objectfile = os.path.join(testdir, 'foo.' + objext)
stlibfile = os.path.join(testdir, 'libfoo.a')
impfile = os.path.join(testdir, 'foo.lib')
if cc.get_argument_syntax() == 'msvc':
shlibfile = os.path.join(testdir, 'foo.' + shext)
elif is_cygwin():
shlibfile = os.path.join(testdir, 'cygfoo.' + shext)
else:
shlibfile = os.path.join(testdir, 'libfoo.' + shext)
# Build libs
self.build_static_lib(cc, stlinker, source, objectfile, stlibfile, extra_args=['-DFOO_STATIC'])
self.build_shared_lib(cc, source, objectfile, shlibfile, impfile)
# Run test
os.environ['PKG_CONFIG_LIBDIR'] = self.builddir
try:
self.init(testdir)
self.build()
self.run_tests()
finally:
os.unlink(stlibfile)
os.unlink(shlibfile)
if mesonbuild.mesonlib.is_windows():
# Clean up all the garbage MSVC writes in the
# source tree.
for fname in glob(os.path.join(testdir, 'foo.*')):
if os.path.splitext(fname)[1] not in ['.c', '.h', '.in']:
os.unlink(fname)
@skipIfNoPkgconfig
def test_pkgconfig_gen_escaping(self):
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
prefix = '/usr/with spaces'
libdir = 'lib'
self.init(testdir, extra_args=['--prefix=' + prefix,
'--libdir=' + libdir])
# Find foo dependency
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
# Ensure link_args are properly quoted
libdir = PurePath(prefix) / PurePath(libdir)
link_args = ['-L' + libdir.as_posix(), '-lfoo']
self.assertEqual(foo_dep.get_link_args(), link_args)
# Ensure include args are properly quoted
incdir = PurePath(prefix) / PurePath('include')
cargs = ['-I' + incdir.as_posix()]
self.assertEqual(foo_dep.get_compile_args(), cargs)
def test_array_option_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
expected['value'] = ['oink', 'boink']
self.setconf('-Dlist=oink,boink')
changed = get_opt()
self.assertEqual(changed, expected)
def test_array_option_bad_change(self):
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': ['foo', 'bar'],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir)
original = get_opt()
self.assertDictEqual(original, expected)
with self.assertRaises(subprocess.CalledProcessError):
self.setconf('-Dlist=bad')
changed = get_opt()
self.assertDictEqual(changed, expected)
def test_array_option_empty_equivalents(self):
"""Array options treat -Dopt=[] and -Dopt= as equivalent."""
def get_opt():
opts = self.introspect('--buildoptions')
for x in opts:
if x.get('name') == 'list':
return x
raise Exception(opts)
expected = {
'name': 'list',
'description': 'list',
'section': 'user',
'type': 'array',
'value': [],
}
tdir = os.path.join(self.unit_test_dir, '19 array option')
self.init(tdir, extra_args='-Dlist=')
original = get_opt()
self.assertDictEqual(original, expected)
def opt_has(self, name, value):
res = self.introspect('--buildoptions')
found = False
for i in res:
if i['name'] == name:
self.assertEqual(i['value'], value)
found = True
break
self.assertTrue(found, "Array option not found in introspect data.")
def test_free_stringarray_setting(self):
testdir = os.path.join(self.common_test_dir, '44 options')
self.init(testdir)
self.opt_has('free_array_opt', [])
self.setconf('-Dfree_array_opt=foo,bar', will_build=False)
self.opt_has('free_array_opt', ['foo', 'bar'])
self.setconf("-Dfree_array_opt=['a,b', 'c,d']", will_build=False)
self.opt_has('free_array_opt', ['a,b', 'c,d'])
def test_subproject_promotion(self):
testdir = os.path.join(self.unit_test_dir, '12 promote')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
s3dir = os.path.join(spdir, 's3')
scommondir = os.path.join(spdir, 'scommon')
self.assertFalse(os.path.isdir(s3dir))
subprocess.check_call(self.wrap_command + ['promote', 's3'], cwd=workdir)
self.assertTrue(os.path.isdir(s3dir))
self.assertFalse(os.path.isdir(scommondir))
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'scommon'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'invalid/path/to/scommon'],
cwd=workdir,
stderr=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isdir(scommondir))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/scommon'], cwd=workdir)
self.assertTrue(os.path.isdir(scommondir))
promoted_wrap = os.path.join(spdir, 'athing.wrap')
self.assertFalse(os.path.isfile(promoted_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'athing'], cwd=workdir)
self.assertTrue(os.path.isfile(promoted_wrap))
self.init(workdir)
self.build()
def test_subproject_promotion_wrap(self):
testdir = os.path.join(self.unit_test_dir, '44 promote wrap')
workdir = os.path.join(self.builddir, 'work')
shutil.copytree(testdir, workdir)
spdir = os.path.join(workdir, 'subprojects')
ambiguous_wrap = os.path.join(spdir, 'ambiguous.wrap')
self.assertNotEqual(subprocess.call(self.wrap_command + ['promote', 'ambiguous'],
cwd=workdir,
stdout=subprocess.DEVNULL), 0)
self.assertFalse(os.path.isfile(ambiguous_wrap))
subprocess.check_call(self.wrap_command + ['promote', 'subprojects/s2/subprojects/ambiguous.wrap'], cwd=workdir)
self.assertTrue(os.path.isfile(ambiguous_wrap))
def test_warning_location(self):
tdir = os.path.join(self.unit_test_dir, '22 warning location')
out = self.init(tdir)
for expected in [
r'meson.build:4: WARNING: Keyword argument "link_with" defined multiple times.',
r'sub' + os.path.sep + r'meson.build:3: WARNING: Keyword argument "link_with" defined multiple times.',
r'meson.build:6: WARNING: a warning of some sort',
r'sub' + os.path.sep + r'meson.build:4: WARNING: subdir warning',
r'meson.build:7: WARNING: Module unstable-simd has no backwards or forwards compatibility and might not exist in future releases.',
r"meson.build:11: WARNING: The variable(s) 'MISSING' in the input file 'conf.in' are not present in the given configuration data.",
r'meson.build:1: WARNING: Passed invalid keyword argument "invalid".',
]:
self.assertRegex(out, re.escape(expected))
def test_permitted_method_kwargs(self):
tdir = os.path.join(self.unit_test_dir, '25 non-permitted kwargs')
out = self.init(tdir)
for expected in [
r'WARNING: Passed invalid keyword argument "prefixxx".',
r'WARNING: Passed invalid keyword argument "argsxx".',
r'WARNING: Passed invalid keyword argument "invalidxx".',
]:
self.assertRegex(out, re.escape(expected))
def test_templates(self):
ninja = detect_ninja()
if ninja is None:
raise unittest.SkipTest('This test currently requires ninja. Fix this once "meson build" works.')
for lang in ('c', 'cpp'):
for type in ('executable', 'library'):
with tempfile.TemporaryDirectory() as tmpdir:
self._run(self.meson_command + ['init', '--language', lang, '--type', type],
workdir=tmpdir)
self._run(self.setup_command + ['--backend=ninja', 'builddir'],
workdir=tmpdir)
self._run(ninja,
workdir=os.path.join(tmpdir, 'builddir'))
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'foo.' + lang), 'w') as f:
f.write('int main() {}')
self._run(self.meson_command + ['init', '-b'], workdir=tmpdir)
# The test uses mocking and thus requires that
# the current process is the one to run the Meson steps.
# If we are using an external test executable (most commonly
# in Debian autopkgtests) then the mocking won't work.
@unittest.skipIf('MESON_EXE' in os.environ, 'MESON_EXE is defined, can not use mocking.')
def test_cross_file_system_paths(self):
if is_windows():
raise unittest.SkipTest('system crossfile paths not defined for Windows (yet)')
testdir = os.path.join(self.common_test_dir, '1 trivial')
cross_content = textwrap.dedent("""\
[binaries]
c = '/usr/bin/cc'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'x86'
cpu = 'i686'
endian = 'little'
""")
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch.dict(os.environ, {'XDG_DATA_HOME': d}):
self.init(testdir, ['--cross-file=' + name], inprocess=True)
self.wipe()
with mock.patch.dict(os.environ, {'XDG_DATA_DIRS': d}):
os.environ.pop('XDG_DATA_HOME', None)
self.init(testdir, ['--cross-file=' + name], inprocess=True)
self.wipe()
with tempfile.TemporaryDirectory() as d:
dir_ = os.path.join(d, '.local', 'share', 'meson', 'cross')
os.makedirs(dir_)
with tempfile.NamedTemporaryFile('w', dir=dir_, delete=False) as f:
f.write(cross_content)
name = os.path.basename(f.name)
with mock.patch('mesonbuild.coredata.os.path.expanduser', lambda x: x.replace('~', d)):
self.init(testdir, ['--cross-file=' + name], inprocess=True)
self.wipe()
def test_introspect_target_files(self):
'''
Tests that mesonintrospect --target-files returns expected output.
'''
testdir = os.path.join(self.common_test_dir, '8 install')
self.init(testdir)
expected = {
'stat@sta': ['stat.c'],
'prog@exe': ['prog.c'],
}
t_intro = self.introspect('--targets')
self.assertCountEqual([t['id'] for t in t_intro], expected)
for t in t_intro:
id = t['id']
tf_intro = self.introspect(['--target-files', id])
self.assertEqual(tf_intro, expected[id])
self.wipe()
testdir = os.path.join(self.common_test_dir, '53 custom target')
self.init(testdir)
expected = {
'bindat@cus': ['data_source.txt'],
'depfile@cus': [],
}
t_intro = self.introspect('--targets')
self.assertCountEqual([t['id'] for t in t_intro], expected)
for t in t_intro:
id = t['id']
tf_intro = self.introspect(['--target-files', id])
self.assertEqual(tf_intro, expected[id])
self.wipe()
def test_compiler_run_command(self):
'''
The test checks that the compiler object can be passed to
run_command().
'''
testdir = os.path.join(self.unit_test_dir, '24 compiler run_command')
self.init(testdir)
def test_identical_target_name_in_subproject_flat_layout(self):
'''
Test that identical targets in different subprojects do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '178 identical target name in subproject flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_identical_target_name_in_subdir_flat_layout(self):
'''
Test that identical targets in different subdirs do not collide
if layout is flat.
'''
testdir = os.path.join(self.common_test_dir, '187 same target name flat layout')
self.init(testdir, extra_args=['--layout=flat'])
self.build()
def test_flock(self):
exception_raised = False
with tempfile.TemporaryDirectory() as tdir:
os.mkdir(os.path.join(tdir, 'meson-private'))
with BuildDirLock(tdir):
try:
with BuildDirLock(tdir):
pass
except MesonException:
exception_raised = True
self.assertTrue(exception_raised, 'Double locking did not raise exception.')
@unittest.skipIf(is_osx(), 'Test not applicable to OSX')
def test_check_module_linking(self):
"""
Test that link_with: a shared module issues a warning
https://github.com/mesonbuild/meson/issues/2865
(That an error is raised on OSX is exercised by test failing/78)
"""
tdir = os.path.join(self.unit_test_dir, '30 shared_mod linking')
out = self.init(tdir)
msg = ('''WARNING: target links against shared modules. This is not
recommended as it is not supported on some platforms''')
self.assertIn(msg, out)
def test_ndebug_if_release_disabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=release', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=1', subprocess.check_output(exe).strip())
def test_ndebug_if_release_enabled(self):
testdir = os.path.join(self.unit_test_dir, '28 ndebug if-release')
self.init(testdir, extra_args=['--buildtype=debugoptimized', '-Db_ndebug=if-release'])
self.build()
exe = os.path.join(self.builddir, 'main')
self.assertEqual(b'NDEBUG=0', subprocess.check_output(exe).strip())
def test_guessed_linker_dependencies(self):
'''
Test that meson adds dependencies for libraries based on the final
linker command line.
'''
# build library
testdirbase = os.path.join(self.unit_test_dir, '29 guessed linker dependencies')
testdirlib = os.path.join(testdirbase, 'lib')
extra_args = None
env = get_fake_env(testdirlib, self.builddir, self.prefix)
if env.detect_c_compiler(False).get_id() not in ['msvc', 'clang-cl']:
# static libraries are not linkable with -l with msvc because meson installs them
# as .a files which unix_args_to_native will not know as it expects libraries to use
# .lib as extension. For a DLL the import library is installed as .lib. Thus for msvc
# this tests needs to use shared libraries to test the path resolving logic in the
# dependency generation code path.
extra_args = ['--default-library', 'static']
self.init(testdirlib, extra_args=extra_args)
self.build()
self.install()
libbuilddir = self.builddir
installdir = self.installdir
libdir = os.path.join(self.installdir, self.prefix.lstrip('/').lstrip('\\'), 'lib')
# build user of library
self.new_builddir()
# replace is needed because meson mangles platform pathes passed via LDFLAGS
os.environ["LDFLAGS"] = '-L{}'.format(libdir.replace('\\', '/'))
self.init(os.path.join(testdirbase, 'exe'))
del os.environ["LDFLAGS"]
self.build()
self.assertBuildIsNoop()
# rebuild library
exebuilddir = self.builddir
self.installdir = installdir
self.builddir = libbuilddir
# Microsoft's compiler is quite smart about touching import libs on changes,
# so ensure that there is actually a change in symbols.
self.setconf('-Dmore_exports=true')
self.build()
self.install()
# no ensure_backend_detects_changes needed because self.setconf did that already
# assert user of library will be rebuild
self.builddir = exebuilddir
self.assertRebuiltTarget('app')
def test_conflicting_d_dash_option(self):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
with self.assertRaises(subprocess.CalledProcessError) as e:
self.init(testdir, extra_args=['-Dbindir=foo', '--bindir=bar'])
# Just to ensure that we caught the correct error
self.assertIn('passed as both', e.stderr)
def _test_same_option_twice(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir, extra_args=args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice(self):
self._test_same_option_twice('bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice(self):
self._test_same_option_twice('bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice(self):
self._test_same_option_twice('one', ['-Done=foo', '-Done=bar'])
def _test_same_option_twice_configure(self, arg, args):
testdir = os.path.join(self.unit_test_dir, '37 mixed command line args')
self.init(testdir)
self.setconf(args)
opts = self.introspect('--buildoptions')
for item in opts:
if item['name'] == arg:
self.assertEqual(item['value'], 'bar')
return
raise Exception('Missing {} value?'.format(arg))
def test_same_dash_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['--bindir=foo', '--bindir=bar'])
def test_same_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'bindir', ['-Dbindir=foo', '-Dbindir=bar'])
def test_same_project_d_option_twice_configure(self):
self._test_same_option_twice_configure(
'one', ['-Done=foo', '-Done=bar'])
def test_command_line(self):
testdir = os.path.join(self.unit_test_dir, '34 command line')
# Verify default values when passing no args
self.init(testdir)
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'static')
self.assertEqual(obj.builtins['warning_level'].value, '1')
self.assertEqual(obj.user_options['set_sub_opt'].value, True)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'default3')
self.wipe()
# warning_level is special, it's --warnlevel instead of --warning-level
# for historical reasons
self.init(testdir, extra_args=['--warnlevel=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('--warnlevel=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# But when using -D syntax, it should be 'warning_level'
self.init(testdir, extra_args=['-Dwarning_level=2'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '2')
self.setconf('-Dwarning_level=3')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['warning_level'].value, '3')
self.wipe()
# Mixing --option and -Doption is forbidden
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf(['--warnlevel=1', '-Dwarning_level=3'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('as both', cm.exception.output)
self.wipe()
# --default-library should override default value from project()
self.init(testdir, extra_args=['--default-library=both'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'both')
self.setconf('--default-library=shared')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
if self.backend is Backend.ninja:
# reconfigure target works only with ninja backend
self.build('reconfigure')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['default_library'].value, 'shared')
self.wipe()
# Should warn on unknown options
out = self.init(testdir, extra_args=['-Dbad=1', '-Dfoo=2', '-Dwrong_link_args=foo'])
self.assertIn('Unknown options: "bad, foo, wrong_link_args"', out)
self.wipe()
# Should fail on malformed option
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(testdir, extra_args=['-Dfoo'])
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.init(testdir)
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.setconf('-Dfoo')
self.assertNotEqual(0, cm.exception.returncode)
self.assertIn('Option \'foo\' must have a value separated by equals sign.', cm.exception.output)
self.wipe()
# It is not an error to set wrong option for unknown subprojects or
# language because we don't have control on which one will be selected.
self.init(testdir, extra_args=['-Dc_wrong=1', '-Dwrong:bad=1', '-Db_wrong=1'])
self.wipe()
# Test we can set subproject option
self.init(testdir, extra_args=['-Dsubp:subp_opt=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.user_options['subp:subp_opt'].value, 'foo')
self.wipe()
# c_args value should be parsed with shlex
self.init(testdir, extra_args=['-Dc_args=foo bar "one two"'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options['c_args'].value, ['foo', 'bar', 'one two'])
self.setconf('-Dc_args="foo bar" one two')
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.compiler_options['c_args'].value, ['foo bar', 'one', 'two'])
self.wipe()
# Setting a 2nd time the same option should override the first value
try:
self.init(testdir, extra_args=['--bindir=foo', '--bindir=bar',
'-Dbuildtype=plain', '-Dbuildtype=release',
'-Db_sanitize=address', '-Db_sanitize=thread',
'-Dc_args=foo', '-Dc_args=bar'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'bar')
self.assertEqual(obj.builtins['buildtype'].value, 'release')
self.assertEqual(obj.base_options['b_sanitize'].value, 'thread')
self.assertEqual(obj.compiler_options['c_args'].value, ['bar'])
self.setconf(['--bindir=bar', '--bindir=foo',
'-Dbuildtype=release', '-Dbuildtype=plain',
'-Db_sanitize=thread', '-Db_sanitize=address',
'-Dc_args=bar', '-Dc_args=foo'])
obj = mesonbuild.coredata.load(self.builddir)
self.assertEqual(obj.builtins['bindir'].value, 'foo')
self.assertEqual(obj.builtins['buildtype'].value, 'plain')
self.assertEqual(obj.base_options['b_sanitize'].value, 'address')
self.assertEqual(obj.compiler_options['c_args'].value, ['foo'])
self.wipe()
except KeyError:
# Ignore KeyError, it happens on CI for compilers that does not
# support b_sanitize. We have to test with a base option because
# they used to fail this test with Meson 0.46 an earlier versions.
pass
def test_feature_check_usage_subprojects(self):
testdir = os.path.join(self.unit_test_dir, '41 featurenew subprojects')
out = self.init(testdir)
# Parent project warns correctly
self.assertRegex(out, "WARNING: Project targetting '>=0.45'.*'0.47.0': dict")
# Subprojects warn correctly
self.assertRegex(out, r"\|WARNING: Project targetting '>=0.40'.*'0.44.0': disabler")
self.assertRegex(out, r"\|WARNING: Project targetting '!=0.40'.*'0.44.0': disabler")
# Subproject has a new-enough meson_version, no warning
self.assertNotRegex(out, "WARNING: Project targetting.*Python")
# Ensure a summary is printed in the subproject and the outer project
self.assertRegex(out, r"\|WARNING: Project specifies a minimum meson_version '>=0.40'")
self.assertRegex(out, r"\| \* 0.44.0: {'disabler'}")
self.assertRegex(out, "WARNING: Project specifies a minimum meson_version '>=0.45'")
self.assertRegex(out, " * 0.47.0: {'dict'}")
def test_configure_file_warnings(self):
testdir = os.path.join(self.common_test_dir, "14 configure file")
out = self.init(testdir)
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*'FOO_BAR'.*nosubst-nocopy2.txt.in.*not present.*")
self.assertRegex(out, "WARNING:.*'empty'.*config.h.in.*not present.*")
self.assertRegex(out, "WARNING:.*empty configuration_data.*test.py.in")
# Warnings for configuration files that are overwritten.
self.assertRegex(out, "WARNING:.*\"double_output.txt\".*overwrites")
self.assertRegex(out, "WARNING:.*\"subdir.double_output2.txt\".*overwrites")
self.assertNotRegex(out, "WARNING:.*no_write_conflict.txt.*overwrites")
self.assertNotRegex(out, "WARNING:.*@BASENAME@.*overwrites")
self.assertRegex(out, "WARNING:.*\"sameafterbasename\".*overwrites")
# No warnings about empty configuration data objects passed to files with substitutions
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy1.txt.in")
self.assertNotRegex(out, "WARNING:.*empty configuration_data.*nosubst-nocopy2.txt.in")
with open(os.path.join(self.builddir, 'nosubst-nocopy1.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'/* #undef FOO_BAR */')
with open(os.path.join(self.builddir, 'nosubst-nocopy2.txt'), 'rb') as f:
self.assertEqual(f.read().strip(), b'')
self.assertRegex(out, r"DEPRECATION:.*\['array'\] is invalid.*dict")
def test_dirs(self):
with tempfile.TemporaryDirectory() as containing:
with tempfile.TemporaryDirectory(dir=containing) as srcdir:
mfile = os.path.join(srcdir, 'meson.build')
of = open(mfile, 'w')
of.write("project('foobar', 'c')\n")
of.close()
pc = subprocess.run(self.setup_command,
cwd=srcdir,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
self.assertIn(b'Must specify at least one directory name', pc.stdout)
with tempfile.TemporaryDirectory(dir=srcdir) as builddir:
subprocess.run(self.setup_command,
check=True,
cwd=builddir,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
def get_opts_as_dict(self):
result = {}
for i in self.introspect('--buildoptions'):
result[i['name']] = i['value']
return result
def test_buildtype_setting(self):
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.setconf('-Ddebug=false')
opts = self.get_opts_as_dict()
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['buildtype'], 'plain')
self.assertEqual(opts['optimization'], '0')
# Setting optimizations to 3 should cause buildtype
# to go to release mode.
self.setconf('-Doptimization=3')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'release')
self.assertEqual(opts['debug'], False)
self.assertEqual(opts['optimization'], '3')
# Going to debug build type should reset debugging
# and optimization
self.setconf('-Dbuildtype=debug')
opts = self.get_opts_as_dict()
self.assertEqual(opts['buildtype'], 'debug')
self.assertEqual(opts['debug'], True)
self.assertEqual(opts['optimization'], '0')
@skipIfNoPkgconfig
@unittest.skipIf(is_windows(), 'Help needed with fixing this test on windows')
def test_native_dep_pkgconfig(self):
testdir = os.path.join(self.unit_test_dir,
'46 native dep pkgconfig var')
with tempfile.NamedTemporaryFile(mode='w', delete=False) as crossfile:
crossfile.write(textwrap.dedent(
'''[binaries]
pkgconfig = r'{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7'
endian = 'little'
'''.format(os.path.join(testdir, 'cross_pkgconfig.py'))))
crossfile.flush()
self.meson_cross_file = crossfile.name
os.environ['PKG_CONFIG_LIBDIR'] = os.path.join(testdir,
'native_pkgconfig')
self.init(testdir, extra_args=['-Dstart_native=false'])
self.wipe()
self.init(testdir, extra_args=['-Dstart_native=true'])
def __reconfigure(self):
# Set an older version to force a reconfigure from scratch
filename = os.path.join(self.privatedir, 'coredata.dat')
with open(filename, 'rb') as f:
obj = pickle.load(f)
obj.version = '0.47.0'
with open(filename, 'wb') as f:
pickle.dump(obj, f)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '46 reconfigure')
self.init(testdir, extra_args=['-Dopt1=val1'])
self.setconf('-Dopt2=val2')
self.__reconfigure()
out = self.init(testdir, extra_args=['--reconfigure', '-Dopt3=val3'])
self.assertRegex(out, 'WARNING:.*Regenerating configuration from scratch')
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 default4')
self.build()
self.run_tests()
# Create a file in builddir and verify wipe command removes it
filename = os.path.join(self.builddir, 'something')
open(filename, 'w').close()
self.assertTrue(os.path.exists(filename))
out = self.init(testdir, extra_args=['--wipe', '-Dopt4=val4'])
self.assertFalse(os.path.exists(filename))
self.assertRegex(out, 'opt1 val1')
self.assertRegex(out, 'opt2 val2')
self.assertRegex(out, 'opt3 val3')
self.assertRegex(out, 'opt4 val4')
self.build()
self.run_tests()
def test_wipe_from_builddir(self):
testdir = os.path.join(self.common_test_dir, '162 custom target subdir depend files')
self.init(testdir)
self.__reconfigure()
with Path(self.builddir):
self.init(testdir, extra_args=['--wipe'])
def test_target_construct_id_from_path(self):
# This id is stable but not guessable.
# The test is supposed to prevent unintentional
# changes of target ID generation.
target_id = Target.construct_id_from_path('some/obscure/subdir',
'target-id', '@suffix')
self.assertEqual('5e002d3@@target-id@suffix', target_id)
target_id = Target.construct_id_from_path('subproject/foo/subdir/bar',
'target2-id', '@other')
self.assertEqual('81d46d1@@target2-id@other', target_id)
def test_introspect_projectinfo_without_configured_build(self):
testfile = os.path.join(self.common_test_dir, '36 run program', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson.build']))
self.assertEqual(res['version'], None)
self.assertEqual(res['descriptive_name'], 'run command')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '44 options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], None)
self.assertEqual(res['descriptive_name'], 'options')
self.assertEqual(res['subprojects'], [])
testfile = os.path.join(self.common_test_dir, '47 subproject options', 'meson.build')
res = self.introspect_directory(testfile, '--projectinfo')
self.assertEqual(set(res['buildsystem_files']), set(['meson_options.txt', 'meson.build']))
self.assertEqual(res['version'], None)
self.assertEqual(res['descriptive_name'], 'suboptions')
self.assertEqual(len(res['subprojects']), 1)
subproject_files = set(f.replace('\\', '/') for f in res['subprojects'][0]['buildsystem_files'])
self.assertEqual(subproject_files, set(['subprojects/subproject/meson_options.txt', 'subprojects/subproject/meson.build']))
self.assertEqual(res['subprojects'][0]['name'], 'subproject')
self.assertEqual(res['subprojects'][0]['version'], 'undefined')
self.assertEqual(res['subprojects'][0]['descriptive_name'], 'subproject')
def test_introspect_projectinfo_subprojects(self):
testdir = os.path.join(self.common_test_dir, '103 subproject subdir')
self.init(testdir)
res = self.introspect('--projectinfo')
expected = {
'descriptive_name': 'proj',
'version': 'undefined',
'subprojects': [
{
'descriptive_name': 'sub',
'name': 'sub',
'version': 'undefined'
}
]
}
self.assertDictEqual(res, expected)
@skipIfNoExecutable('clang-format')
def test_clang_format(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Clang-format is for now only supported on Ninja, not {}'.format(self.backend.name))
testdir = os.path.join(self.unit_test_dir, '51 clang-format')
testfile = os.path.join(testdir, 'prog.c')
badfile = os.path.join(testdir, 'prog_orig_c')
goodfile = os.path.join(testdir, 'prog_expected_c')
try:
self.run_clangformat(testdir, testfile, badfile, goodfile)
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def run_clangformat(self, testdir, testfile, badfile, goodfile):
shutil.copyfile(badfile, testfile)
self.init(testdir)
self.assertNotEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
self.run_target('clang-format')
self.assertEqual(Path(testfile).read_text(),
Path(goodfile).read_text())
def test_introspect_buildoptions_without_configured_build(self):
testdir = os.path.join(self.unit_test_dir, '51 introspect buildoptions')
testfile = os.path.join(testdir, 'meson.build')
res_nb = self.introspect_directory(testfile, ['--buildoptions'] + self.meson_args)
self.init(testdir, default_args=False)
res_wb = self.introspect('--buildoptions')
self.maxDiff = None
self.assertListEqual(res_nb, res_wb)
def test_introspect_json_dump(self):
testdir = os.path.join(self.unit_test_dir, '49 introspection')
self.init(testdir)
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
def assertKeyTypes(key_type_list, obj):
for i in key_type_list:
self.assertIn(i[0], obj)
self.assertIsInstance(obj[i[0]], i[1])
root_keylist = [
('benchmarks', list),
('buildoptions', list),
('buildsystem_files', list),
('dependencies', list),
('installed', dict),
('projectinfo', dict),
('targets', list),
('tests', list),
]
test_keylist = [
('cmd', list),
('env', dict),
('name', str),
('timeout', int),
('suite', list),
('is_parallel', bool),
]
buildoptions_keylist = [
('name', str),
('section', str),
('type', str),
('description', str),
]
buildoptions_typelist = [
('combo', str, [('choices', list)]),
('string', str, []),
('boolean', bool, []),
('integer', int, []),
('array', list, []),
]
dependencies_typelist = [
('name', str),
('compile_args', list),
('link_args', list),
]
targets_typelist = [
('name', str),
('id', str),
('type', str),
('defined_in', str),
('filename', list),
('build_by_default', bool),
('target_sources', list),
('installed', bool),
]
targets_sources_typelist = [
('language', str),
('compiler', list),
('parameters', list),
('sources', list),
('generated_sources', list),
]
# First load all files
res = {}
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i[0]))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res[i[0]] = json.load(fp)
assertKeyTypes(root_keylist, res)
# Check Tests and benchmarks
tests_to_find = ['test case 1', 'test case 2', 'benchmark 1']
for i in res['benchmarks'] + res['tests']:
assertKeyTypes(test_keylist, i)
if i['name'] in tests_to_find:
tests_to_find.remove(i['name'])
self.assertListEqual(tests_to_find, [])
# Check buildoptions
buildopts_to_find = {'cpp_std': 'c++11'}
for i in res['buildoptions']:
assertKeyTypes(buildoptions_keylist, i)
valid_type = False
for j in buildoptions_typelist:
if i['type'] == j[0]:
self.assertIsInstance(i['value'], j[1])
assertKeyTypes(j[2], i)
valid_type = True
break
self.assertTrue(valid_type)
if i['name'] in buildopts_to_find:
self.assertEqual(i['value'], buildopts_to_find[i['name']])
buildopts_to_find.pop(i['name'], None)
self.assertDictEqual(buildopts_to_find, {})
# Check buildsystem_files
bs_files = ['meson.build', 'sharedlib/meson.build', 'staticlib/meson.build']
bs_files = [os.path.join(testdir, x) for x in bs_files]
self.assertPathListEqual(res['buildsystem_files'], bs_files)
# Check dependencies
dependencies_to_find = ['threads']
for i in res['dependencies']:
assertKeyTypes(dependencies_typelist, i)
if i['name'] in dependencies_to_find:
dependencies_to_find.remove(i['name'])
self.assertListEqual(dependencies_to_find, [])
# Check projectinfo
self.assertDictEqual(res['projectinfo'], {'version': '1.2.3', 'descriptive_name': 'introspection', 'subprojects': []})
# Check targets
targets_to_find = {
'sharedTestLib': ('shared library', True, False, 'sharedlib/meson.build'),
'staticTestLib': ('static library', True, False, 'staticlib/meson.build'),
'test1': ('executable', True, True, 'meson.build'),
'test2': ('executable', True, False, 'meson.build'),
'test3': ('executable', True, False, 'meson.build'),
}
for i in res['targets']:
assertKeyTypes(targets_typelist, i)
if i['name'] in targets_to_find:
tgt = targets_to_find[i['name']]
self.assertEqual(i['type'], tgt[0])
self.assertEqual(i['build_by_default'], tgt[1])
self.assertEqual(i['installed'], tgt[2])
self.assertPathEqual(i['defined_in'], os.path.join(testdir, tgt[3]))
targets_to_find.pop(i['name'], None)
for j in i['target_sources']:
assertKeyTypes(targets_sources_typelist, j)
self.assertDictEqual(targets_to_find, {})
def test_introspect_file_dump_equals_all(self):
testdir = os.path.join(self.unit_test_dir, '49 introspection')
self.init(testdir)
res_all = self.introspect('--all')
res_file = {}
root_keylist = [
'benchmarks',
'buildoptions',
'buildsystem_files',
'dependencies',
'installed',
'projectinfo',
'targets',
'tests',
]
infodir = os.path.join(self.builddir, 'meson-info')
self.assertPathExists(infodir)
for i in root_keylist:
curr = os.path.join(infodir, 'intro-{}.json'.format(i))
self.assertPathExists(curr)
with open(curr, 'r') as fp:
res_file[i] = json.load(fp)
self.assertEqual(res_all, res_file)
def test_introspect_meson_info(self):
testdir = os.path.join(self.unit_test_dir, '49 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'meson-info.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
for i in ['meson_version', 'directories', 'introspection', 'build_files_updated', 'error']:
self.assertIn(i, res1)
self.assertEqual(res1['error'], False)
self.assertEqual(res1['build_files_updated'], True)
def test_introspect_config_update(self):
testdir = os.path.join(self.unit_test_dir, '49 introspection')
introfile = os.path.join(self.builddir, 'meson-info', 'intro-buildoptions.json')
self.init(testdir)
self.assertPathExists(introfile)
with open(introfile, 'r') as fp:
res1 = json.load(fp)
self.setconf('-Dcpp_std=c++14')
self.setconf('-Dbuildtype=release')
for idx, i in enumerate(res1):
if i['name'] == 'cpp_std':
res1[idx]['value'] = 'c++14'
if i['name'] == 'buildtype':
res1[idx]['value'] = 'release'
if i['name'] == 'optimization':
res1[idx]['value'] = '3'
if i['name'] == 'debug':
res1[idx]['value'] = False
with open(introfile, 'r') as fp:
res2 = json.load(fp)
self.assertListEqual(res1, res2)
class FailureTests(BasePlatformTests):
'''
Tests that test failure conditions. Build files here should be dynamically
generated and static tests should go into `test cases/failing*`.
This is useful because there can be many ways in which a particular
function can fail, and creating failing tests for all of them is tedious
and slows down testing.
'''
dnf = "[Dd]ependency.*not found(:.*)?"
nopkg = '[Pp]kg-config not found'
def setUp(self):
super().setUp()
self.srcdir = os.path.realpath(tempfile.mkdtemp())
self.mbuild = os.path.join(self.srcdir, 'meson.build')
def tearDown(self):
super().tearDown()
windows_proof_rmtree(self.srcdir)
def assertMesonRaises(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents raises
a error message matching regex @match.
'''
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('failure test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Force tracebacks so we can detect them properly
os.environ['MESON_FORCE_BACKTRACE'] = '1'
with self.assertRaisesRegex(MesonException, match, msg=contents):
# Must run in-process or we'll get a generic CalledProcessError
self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def obtainMesonOutput(self, contents, match, extra_args, langs, meson_version=None):
if langs is None:
langs = []
with open(self.mbuild, 'w') as f:
f.write("project('output test', 'c', 'cpp'")
if meson_version:
f.write(", meson_version: '{}'".format(meson_version))
f.write(")\n")
for lang in langs:
f.write("add_languages('{}', required : false)\n".format(lang))
f.write(contents)
# Run in-process for speed and consistency with assertMesonRaises
return self.init(self.srcdir, extra_args=extra_args, inprocess=True)
def assertMesonOutputs(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents outputs
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertRegex(out, match)
def assertMesonDoesNotOutput(self, contents, match, extra_args=None, langs=None, meson_version=None):
'''
Assert that running meson configure on the specified @contents does not output
something that matches regex @match.
'''
out = self.obtainMesonOutput(contents, match, extra_args, langs, meson_version)
self.assertNotRegex(out, match)
@skipIfNoPkgconfig
def test_dependency(self):
if subprocess.call(['pkg-config', '--exists', 'zlib']) != 0:
raise unittest.SkipTest('zlib not found with pkg-config')
a = (("dependency('zlib', method : 'fail')", "'fail' is invalid"),
("dependency('zlib', static : '1')", "[Ss]tatic.*boolean"),
("dependency('zlib', version : 1)", "[Vv]ersion.*string or list"),
("dependency('zlib', required : 1)", "[Rr]equired.*boolean"),
("dependency('zlib', method : 1)", "[Mm]ethod.*string"),
("dependency('zlibfail')", self.dnf),)
for contents, match in a:
self.assertMesonRaises(contents, match)
def test_apple_frameworks_dependency(self):
if not is_osx():
raise unittest.SkipTest('only run on macOS')
self.assertMesonRaises("dependency('appleframeworks')",
"requires at least one module")
def test_sdl2_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('sdl2-config'):
raise unittest.SkipTest('sdl2-config found')
self.assertMesonRaises("dependency('sdl2', method : 'sdlconfig')", self.dnf)
if shutil.which('pkg-config'):
errmsg = self.dnf
else:
errmsg = self.nopkg
self.assertMesonRaises("dependency('sdl2', method : 'pkg-config')", errmsg)
def test_gnustep_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('gnustep-config'):
raise unittest.SkipTest('gnustep-config found')
self.assertMesonRaises("dependency('gnustep')",
"(requires a Objc compiler|{})".format(self.dnf),
langs = ['objc'])
def test_wx_notfound_dependency(self):
# Want to test failure, so skip if available
if shutil.which('wx-config-3.0') or shutil.which('wx-config') or shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('wx-config, wx-config-3.0 or wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets')", self.dnf)
self.assertMesonOutputs("dependency('wxwidgets', required : false)",
"Dependency .*WxWidgets.* found: .*NO.*")
def test_wx_dependency(self):
if not shutil.which('wx-config-3.0') and not shutil.which('wx-config') and not shutil.which('wx-config-gtk3'):
raise unittest.SkipTest('Neither wx-config, wx-config-3.0 nor wx-config-gtk3 found')
self.assertMesonRaises("dependency('wxwidgets', modules : 1)",
"module argument is not a string")
def test_llvm_dependency(self):
self.assertMesonRaises("dependency('llvm', modules : 'fail')",
"(required.*fail|{})".format(self.dnf))
def test_boost_notfound_dependency(self):
# Can be run even if Boost is found or not
self.assertMesonRaises("dependency('boost', modules : 1)",
"module.*not a string")
self.assertMesonRaises("dependency('boost', modules : 'fail')",
"(fail.*not found|{})".format(self.dnf))
def test_boost_BOOST_ROOT_dependency(self):
# Test BOOST_ROOT; can be run even if Boost is found or not
os.environ['BOOST_ROOT'] = 'relative/path'
self.assertMesonRaises("dependency('boost')",
"(BOOST_ROOT.*absolute|{})".format(self.dnf))
def test_dependency_invalid_method(self):
code = '''zlib_dep = dependency('zlib', required : false)
zlib_dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "'zlib' is not a config-tool dependency")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_pkgconfig_variable('foo')
'''
self.assertMesonRaises(code, "Method.*pkgconfig.*is invalid.*internal")
code = '''zlib_dep = dependency('zlib', required : false)
dep = declare_dependency(dependencies : zlib_dep)
dep.get_configtool_variable('foo')
'''
self.assertMesonRaises(code, "Method.*configtool.*is invalid.*internal")
def test_objc_cpp_detection(self):
'''
Test that when we can't detect objc or objcpp, we fail gracefully.
'''
env = get_fake_env('', self.builddir, self.prefix)
try:
env.detect_objc_compiler(False)
env.detect_objcpp_compiler(False)
except EnvironmentException:
code = "add_languages('objc')\nadd_languages('objcpp')"
self.assertMesonRaises(code, "Unknown compiler")
return
raise unittest.SkipTest("objc and objcpp found, can't test detection failure")
def test_subproject_variables(self):
'''
Test that:
1. The correct message is outputted when a not-required dep is not
found and the fallback subproject is also not found.
2. A not-found not-required dep with a fallback subproject outputs the
correct message when the fallback subproject is found but the
variable inside it is not.
3. A fallback dependency is found from the subproject parsed in (2)
4. A not-required fallback dependency is not found because the
subproject failed to parse.
'''
tdir = os.path.join(self.unit_test_dir, '20 subproj dep variables')
out = self.init(tdir, inprocess=True)
self.assertRegex(out, r"Couldn't use fallback subproject "
"in.*subprojects.*nosubproj.*for the dependency.*somedep")
self.assertRegex(out, r'Dependency.*somenotfounddep.*from subproject.*'
'subprojects.*somesubproj.*found:.*NO')
self.assertRegex(out, r'Dependency.*zlibproxy.*from subproject.*'
'subprojects.*somesubproj.*found:.*YES.*(cached)')
self.assertRegex(out, r'Couldn\'t use fallback subproject in '
'.*subprojects.*failingsubproj.*for the dependency.*somedep')
def test_exception_exit_status(self):
'''
Test exit status on python exception
'''
tdir = os.path.join(self.unit_test_dir, '21 exit status')
os.environ['MESON_UNIT_TEST'] = '1'
with self.assertRaises(subprocess.CalledProcessError) as cm:
self.init(tdir, inprocess=False)
self.assertEqual(cm.exception.returncode, 2)
self.wipe()
def test_dict_requires_key_value_pairs(self):
self.assertMesonRaises("dict = {3, 'foo': 'bar'}",
'Only key:value pairs are valid in dict construction.')
self.assertMesonRaises("{'foo': 'bar', 3}",
'Only key:value pairs are valid in dict construction.')
def test_dict_forbids_duplicate_keys(self):
self.assertMesonRaises("dict = {'a': 41, 'a': 42}",
'Duplicate dictionary key: a.*')
def test_dict_forbids_integer_key(self):
self.assertMesonRaises("dict = {3: 'foo'}",
'Key must be a string.*')
def test_using_too_recent_feature(self):
# Here we use a dict, which was introduced in 0.47.0
self.assertMesonOutputs("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.46.0')
def test_using_recent_feature(self):
# Same as above, except the meson version is now appropriate
self.assertMesonDoesNotOutput("dict = {}",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.47')
def test_using_too_recent_feature_dependency(self):
self.assertMesonOutputs("dependency('pcap', required: false)",
".*WARNING.*Project targetting.*but.*",
meson_version='>= 0.41.0')
def test_vcs_tag_featurenew_build_always_stale(self):
'https://github.com/mesonbuild/meson/issues/3904'
vcs_tag = '''version_data = configuration_data()
version_data.set('PROJVER', '@VCS_TAG@')
vf = configure_file(output : 'version.h.in', configuration: version_data)
f = vcs_tag(input : vf, output : 'version.h')
'''
msg = '.*WARNING:.*feature.*build_always_stale.*custom_target.*'
self.assertMesonDoesNotOutput(vcs_tag, msg, meson_version='>=0.43')
def test_missing_subproject_not_required_and_required(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub2 = subproject('not-found-subproject', required: true)",
""".*Subproject "subprojects/not-found-subproject" required but not found.*""")
def test_get_variable_on_not_found_project(self):
self.assertMesonRaises("sub1 = subproject('not-found-subproject', required: false)\n" +
"sub1.get_variable('naaa')",
"""Subproject "subprojects/not-found-subproject" disabled can't get_variable on it.""")
class WindowsTests(BasePlatformTests):
'''
Tests that should run on Cygwin, MinGW, and MSVC
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/windows')
@unittest.skipIf(is_cygwin(), 'Test only applicable to Windows')
def test_find_program(self):
'''
Test that Windows-specific edge-cases in find_program are functioning
correctly. Cannot be an ordinary test because it involves manipulating
PATH to point to a directory with Python scripts.
'''
testdir = os.path.join(self.platform_test_dir, '8 find program')
# Find `cmd` and `cmd.exe`
prog1 = ExternalProgram('cmd')
self.assertTrue(prog1.found(), msg='cmd not found')
prog2 = ExternalProgram('cmd.exe')
self.assertTrue(prog2.found(), msg='cmd.exe not found')
self.assertPathEqual(prog1.get_path(), prog2.get_path())
# Find cmd with an absolute path that's missing the extension
cmd_path = prog2.get_path()[:-4]
prog = ExternalProgram(cmd_path)
self.assertTrue(prog.found(), msg='{!r} not found'.format(cmd_path))
# Finding a script with no extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script'))
self.assertTrue(prog.found(), msg='test-script not found')
# Finding a script with an extension inside a directory works
prog = ExternalProgram(os.path.join(testdir, 'test-script-ext.py'))
self.assertTrue(prog.found(), msg='test-script-ext.py not found')
# Finding a script in PATH w/o extension works and adds the interpreter
os.environ['PATH'] += os.pathsep + testdir
prog = ExternalProgram('test-script-ext')
self.assertTrue(prog.found(), msg='test-script-ext not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
# Finding a script in PATH with extension works and adds the interpreter
prog = ExternalProgram('test-script-ext.py')
self.assertTrue(prog.found(), msg='test-script-ext.py not found in PATH')
self.assertPathEqual(prog.get_command()[0], python_command[0])
self.assertPathBasenameEqual(prog.get_path(), 'test-script-ext.py')
def test_ignore_libs(self):
'''
Test that find_library on libs that are to be ignored returns an empty
array of arguments. Must be a unit test because we cannot inspect
ExternalLibraryHolder from build files.
'''
testdir = os.path.join(self.platform_test_dir, '1 basic')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(False)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Not using MSVC')
# To force people to update this test, and also test
self.assertEqual(set(cc.ignore_libs), {'c', 'm', 'pthread', 'dl', 'rt'})
for l in cc.ignore_libs:
self.assertEqual(cc.find_library(l, env, []), [])
def test_rc_depends_files(self):
testdir = os.path.join(self.platform_test_dir, '5 resources')
# resource compiler depfile generation is not yet implemented for msvc
env = get_fake_env(testdir, self.builddir, self.prefix)
depfile_works = env.detect_c_compiler(False).get_id() not in ['msvc', 'clang-cl']
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Test compile_resources(depend_file:)
# Changing mtime of sample.ico should rebuild prog
self.utime(os.path.join(testdir, 'res', 'sample.ico'))
self.assertRebuiltTarget('prog')
# Test depfile generation by compile_resources
# Changing mtime of resource.h should rebuild myres.rc and then prog
if depfile_works:
self.utime(os.path.join(testdir, 'inc', 'resource', 'resource.h'))
self.assertRebuiltTarget('prog')
self.wipe()
if depfile_works:
testdir = os.path.join(self.platform_test_dir, '12 resources with custom targets')
self.init(testdir)
self.build()
# Immediately rebuilding should not do anything
self.assertBuildIsNoop()
# Changing mtime of resource.h should rebuild myres_1.rc and then prog_1
self.utime(os.path.join(testdir, 'res', 'resource.h'))
self.assertRebuiltTarget('prog_1')
def test_msvc_cpp17(self):
testdir = os.path.join(self.unit_test_dir, '45 vscpp17')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(False)
if cc.get_argument_syntax() != 'msvc':
raise unittest.SkipTest('Test only applies to MSVC-like compilers')
try:
self.init(testdir)
except subprocess.CalledProcessError:
# According to Python docs, output is only stored when
# using check_output. We don't use it, so we can't check
# that the output is correct (i.e. that it failed due
# to the right reason).
return
self.build()
class DarwinTests(BasePlatformTests):
'''
Tests that should run on macOS
'''
def setUp(self):
super().setUp()
self.platform_test_dir = os.path.join(self.src_root, 'test cases/osx')
def test_apple_bitcode(self):
'''
Test that -fembed-bitcode is correctly added while compiling and
-bitcode_bundle is added while linking when b_bitcode is true and not
when it is false. This can't be an ordinary test case because we need
to inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
# Try with bitcode enabled
out = self.init(testdir, extra_args='-Db_bitcode=true')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(False)
if cc.id != 'clang':
raise unittest.SkipTest('Not using Clang on OSX')
# Warning was printed
self.assertRegex(out, 'WARNING:.*b_bitcode')
# Compiler options were added
compdb = self.get_compdb()
self.assertIn('-fembed-bitcode', compdb[0]['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
# Linker options were added
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNotNone(m, msg=contents)
# Try with bitcode disabled
self.setconf('-Db_bitcode=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fembed-bitcode', compdb[0]['command'])
build_ninja = os.path.join(self.builddir, 'build.ninja')
with open(build_ninja, 'r', encoding='utf-8') as f:
contents = f.read()
m = re.search('LINK_ARGS =.*-bitcode_bundle', contents)
self.assertIsNone(m, msg=contents)
def test_apple_bitcode_modules(self):
'''
Same as above, just for shared_module()
'''
testdir = os.path.join(self.common_test_dir, '153 shared module resolving symbol in executable')
# Ensure that it builds even with bitcode enabled
self.init(testdir, extra_args='-Db_bitcode=true')
self.build()
self.run_tests()
def _get_darwin_versions(self, fname):
fname = os.path.join(self.builddir, fname)
out = subprocess.check_output(['otool', '-L', fname], universal_newlines=True)
m = re.match(r'.*version (.*), current version (.*)\)', out.split('\n')[1])
self.assertIsNotNone(m, msg=out)
return m.groups()
def test_library_versioning(self):
'''
Ensure that compatibility_version and current_version are set correctly
'''
testdir = os.path.join(self.platform_test_dir, '2 library versions')
self.init(testdir)
self.build()
targets = {}
for t in self.introspect('--targets'):
targets[t['name']] = t['filename'][0] if isinstance(t['filename'], list) else t['filename']
self.assertEqual(self._get_darwin_versions(targets['some']), ('7.0.0', '7.0.0'))
self.assertEqual(self._get_darwin_versions(targets['noversion']), ('0.0.0', '0.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlyversion']), ('1.0.0', '1.0.0'))
self.assertEqual(self._get_darwin_versions(targets['onlysoversion']), ('5.0.0', '5.0.0'))
self.assertEqual(self._get_darwin_versions(targets['intver']), ('2.0.0', '2.0.0'))
self.assertEqual(self._get_darwin_versions(targets['stringver']), ('2.3.0', '2.3.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistver']), ('2.4.0', '2.4.0'))
self.assertEqual(self._get_darwin_versions(targets['intstringver']), ('1111.0.0', '2.5.0'))
self.assertEqual(self._get_darwin_versions(targets['stringlistvers']), ('2.6.0', '2.6.1'))
def test_duplicate_rpath(self):
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
# We purposely pass a duplicate rpath to Meson, in order
# to ascertain that Meson does not call install_name_tool
# with duplicate -delete_rpath arguments, which would
# lead to erroring out on installation
os.environ["LDFLAGS"] = "-Wl,-rpath,/foo/bar"
self.init(testdir)
self.build()
self.install()
del os.environ["LDFLAGS"]
class LinuxlikeTests(BasePlatformTests):
'''
Tests that should run on Linux, macOS, and *BSD
'''
def test_basic_soname(self):
'''
Test that the soname is set correctly for shared libraries. This can't
be an ordinary test case because we need to run `readelf` and actually
check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '4 shared')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'libmylib.so')
soname = get_soname(lib1)
self.assertEqual(soname, 'libmylib.so')
def test_custom_soname(self):
'''
Test that the soname is set correctly for shared libraries when
a custom prefix and/or suffix is used. This can't be an ordinary test
case because we need to run `readelf` and actually check the soname.
https://github.com/mesonbuild/meson/issues/785
'''
testdir = os.path.join(self.common_test_dir, '25 library versions')
self.init(testdir)
self.build()
lib1 = os.path.join(self.builddir, 'prefixsomelib.suffix')
soname = get_soname(lib1)
self.assertEqual(soname, 'prefixsomelib.suffix')
def test_pic(self):
'''
Test that -fPIC is correctly added to static libraries when b_staticpic
is true and not when it is false. This can't be an ordinary test case
because we need to inspect the compiler database.
'''
if is_windows() or is_cygwin() or is_osx():
raise unittest.SkipTest('PIC not relevant')
testdir = os.path.join(self.common_test_dir, '3 static')
self.init(testdir)
compdb = self.get_compdb()
self.assertIn('-fPIC', compdb[0]['command'])
self.setconf('-Db_staticpic=false')
# Regenerate build
self.build()
compdb = self.get_compdb()
self.assertNotIn('-fPIC', compdb[0]['command'])
def test_pkgconfig_gen(self):
'''
Test that generated pkg-config files can be found and have the correct
version and link args. This can't be an ordinary test case because we
need to run pkg-config outside of a Meson build file.
https://github.com/mesonbuild/meson/issues/889
'''
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
self.init(testdir)
env = get_fake_env(testdir, self.builddir, self.prefix)
kwargs = {'required': True, 'silent': True}
os.environ['PKG_CONFIG_LIBDIR'] = self.privatedir
foo_dep = PkgConfigDependency('libfoo', env, kwargs)
self.assertTrue(foo_dep.found())
self.assertEqual(foo_dep.get_version(), '1.0')
self.assertIn('-lfoo', foo_dep.get_link_args())
self.assertEqual(foo_dep.get_pkgconfig_variable('foo', {}), 'bar')
self.assertPathEqual(foo_dep.get_pkgconfig_variable('datadir', {}), '/usr/data')
def test_pkgconfig_gen_deps(self):
'''
Test that generated pkg-config files correctly handle dependencies
'''
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen')
self.init(testdir)
privatedir1 = self.privatedir
self.new_builddir()
os.environ['PKG_CONFIG_LIBDIR'] = privatedir1
testdir = os.path.join(self.common_test_dir, '48 pkgconfig-gen', 'dependencies')
self.init(testdir)
privatedir2 = self.privatedir
os.environ['PKG_CONFIG_LIBDIR'] = os.pathsep.join([privatedir1, privatedir2])
self._run(['pkg-config', 'dependency-test', '--validate'])
# pkg-config strips some duplicated flags so we have to parse the
# generated file ourself.
expected = {
'Requires': 'libexposed',
'Requires.private': 'libfoo >= 1.0',
'Libs': '-L${libdir} -llibmain -pthread -lcustom',
'Libs.private': '-lcustom2 -L${libdir} -llibinternal',
'Cflags': '-I${includedir} -pthread -DCUSTOM',
}
if is_osx() or is_haiku():
expected['Cflags'] = expected['Cflags'].replace('-pthread ', '')
with open(os.path.join(privatedir2, 'dependency-test.pc')) as f:
matched_lines = 0
for line in f:
parts = line.split(':', 1)
if parts[0] in expected:
key = parts[0]
val = parts[1].strip()
expected_val = expected[key]
self.assertEqual(expected_val, val)
matched_lines += 1
self.assertEqual(len(expected), matched_lines)
cmd = ['pkg-config', 'requires-test']
out = self._run(cmd + ['--print-requires']).strip().split('\n')
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
cmd = ['pkg-config', 'requires-private-test']
out = self._run(cmd + ['--print-requires-private']).strip().split('\n')
self.assertEqual(sorted(out), sorted(['libexposed', 'libfoo >= 1.0', 'libhello']))
def test_pkg_unfound(self):
testdir = os.path.join(self.unit_test_dir, '23 unfound pkgconfig')
self.init(testdir)
with open(os.path.join(self.privatedir, 'somename.pc')) as f:
pcfile = f.read()
self.assertFalse('blub_blob_blib' in pcfile)
def test_vala_c_warnings(self):
'''
Test that no warnings are emitted for C code generated by Vala. This
can't be an ordinary test case because we need to inspect the compiler
database.
https://github.com/mesonbuild/meson/issues/864
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '5 target glib')
self.init(testdir)
compdb = self.get_compdb()
vala_command = None
c_command = None
for each in compdb:
if each['file'].endswith('GLib.Thread.c'):
vala_command = each['command']
elif each['file'].endswith('GLib.Thread.vala'):
continue
elif each['file'].endswith('retcode.c'):
c_command = each['command']
else:
m = 'Unknown file {!r} in vala_c_warnings test'.format(each['file'])
raise AssertionError(m)
self.assertIsNotNone(vala_command)
self.assertIsNotNone(c_command)
# -w suppresses all warnings, should be there in Vala but not in C
self.assertIn(" -w ", vala_command)
self.assertNotIn(" -w ", c_command)
# -Wall enables all warnings, should be there in C but not in Vala
self.assertNotIn(" -Wall ", vala_command)
self.assertIn(" -Wall ", c_command)
# -Werror converts warnings to errors, should always be there since it's
# injected by an unrelated piece of code and the project has werror=true
self.assertIn(" -Werror ", vala_command)
self.assertIn(" -Werror ", c_command)
@skipIfNoPkgconfig
def test_qtdependency_pkgconfig_detection(self):
'''
Test that qt4 and qt5 detection with pkgconfig works.
'''
# Verify Qt4 or Qt5 can be found with pkg-config
qt4 = subprocess.call(['pkg-config', '--exists', 'QtCore'])
qt5 = subprocess.call(['pkg-config', '--exists', 'Qt5Core'])
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, ['-Dmethod=pkg-config'])
# Confirm that the dependency was found with pkg-config
mesonlog = self.get_meson_log()
if qt4 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Dependency qt4 \(modules: Core\) found: YES 4.* \(pkg-config\)\n')
if qt5 == 0:
self.assertRegex('\n'.join(mesonlog),
r'Dependency qt5 \(modules: Core\) found: YES 5.* \(pkg-config\)\n')
@skip_if_not_base_option('b_sanitize')
def test_generate_gir_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir, ['-Db_sanitize=address', '-Db_lundef=false'])
self.build()
def test_qt5dependency_qmake_detection(self):
'''
Test that qt5 detection with qmake works. This can't be an ordinary
test case because it involves setting the environment.
'''
# Verify that qmake is for Qt5
if not shutil.which('qmake-qt5'):
if not shutil.which('qmake'):
raise unittest.SkipTest('QMake not found')
output = subprocess.getoutput('qmake --version')
if 'Qt version 5' not in output:
raise unittest.SkipTest('Qmake found, but it is not for Qt 5.')
# Disable pkg-config codepath and force searching with qmake/qmake-qt5
testdir = os.path.join(self.framework_test_dir, '4 qt')
self.init(testdir, ['-Dmethod=qmake'])
# Confirm that the dependency was found with qmake
mesonlog = self.get_meson_log()
self.assertRegex('\n'.join(mesonlog),
r'Dependency qt5 \(modules: Core\) found: YES .* \((qmake|qmake-qt5)\)\n')
def _test_soname_impl(self, libpath, install):
if is_cygwin() or is_osx():
raise unittest.SkipTest('Test only applicable to ELF and linuxlike sonames')
testdir = os.path.join(self.unit_test_dir, '1 soname')
self.init(testdir)
self.build()
if install:
self.install()
# File without aliases set.
nover = os.path.join(libpath, 'libnover.so')
self.assertPathExists(nover)
self.assertFalse(os.path.islink(nover))
self.assertEqual(get_soname(nover), 'libnover.so')
self.assertEqual(len(glob(nover[:-3] + '*')), 1)
# File with version set
verset = os.path.join(libpath, 'libverset.so')
self.assertPathExists(verset + '.4.5.6')
self.assertEqual(os.readlink(verset), 'libverset.so.4')
self.assertEqual(get_soname(verset), 'libverset.so.4')
self.assertEqual(len(glob(verset[:-3] + '*')), 3)
# File with soversion set
soverset = os.path.join(libpath, 'libsoverset.so')
self.assertPathExists(soverset + '.1.2.3')
self.assertEqual(os.readlink(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(get_soname(soverset), 'libsoverset.so.1.2.3')
self.assertEqual(len(glob(soverset[:-3] + '*')), 2)
# File with version and soversion set to same values
settosame = os.path.join(libpath, 'libsettosame.so')
self.assertPathExists(settosame + '.7.8.9')
self.assertEqual(os.readlink(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(get_soname(settosame), 'libsettosame.so.7.8.9')
self.assertEqual(len(glob(settosame[:-3] + '*')), 2)
# File with version and soversion set to different values
bothset = os.path.join(libpath, 'libbothset.so')
self.assertPathExists(bothset + '.1.2.3')
self.assertEqual(os.readlink(bothset), 'libbothset.so.1.2.3')
self.assertEqual(os.readlink(bothset + '.1.2.3'), 'libbothset.so.4.5.6')
self.assertEqual(get_soname(bothset), 'libbothset.so.1.2.3')
self.assertEqual(len(glob(bothset[:-3] + '*')), 3)
def test_soname(self):
self._test_soname_impl(self.builddir, False)
def test_installed_soname(self):
libdir = self.installdir + os.path.join(self.prefix, self.libdir)
self._test_soname_impl(libdir, True)
def test_compiler_check_flags_order(self):
'''
Test that compiler check flags override all other flags. This can't be
an ordinary test case because it needs the environment to be set.
'''
Oflag = '-O3'
os.environ['CFLAGS'] = os.environ['CXXFLAGS'] = Oflag
testdir = os.path.join(self.common_test_dir, '40 has function')
self.init(testdir)
cmds = self.get_meson_log_compiler_checks()
for cmd in cmds:
if cmd[0] == 'ccache':
cmd = cmd[1:]
# Verify that -I flags from the `args` kwarg are first
# This is set in the '40 has function' test case
self.assertEqual(cmd[1], '-I/tmp')
# Verify that -O3 set via the environment is overridden by -O0
Oargs = [arg for arg in cmd if arg.startswith('-O')]
self.assertEqual(Oargs, [Oflag, '-O0'])
def _test_stds_impl(self, testdir, compiler, p):
lang_std = p + '_std'
# Check that all the listed -std=xxx options for this compiler work
# just fine when used
for v in compiler.get_options()[lang_std].choices:
if (compiler.get_id() == 'clang' and '17' in v and
(version_compare(compiler.version, '<5.0.0') or
(compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):
continue
if (compiler.get_id() == 'clang' and '2a' in v and
(version_compare(compiler.version, '<6.0.0') or
(compiler.compiler_type == mesonbuild.compilers.CompilerType.CLANG_OSX and version_compare(compiler.version, '<9.1')))):
continue
if (compiler.get_id() == 'gcc' and '2a' in v and version_compare(compiler.version, '<8.0.0')):
continue
std_opt = '{}={}'.format(lang_std, v)
self.init(testdir, ['-D' + std_opt])
cmd = self.get_compdb()[0]['command']
# c++03 and gnu++03 are not understood by ICC, don't try to look for them
skiplist = frozenset([
('intel', 'c++03'),
('intel', 'gnu++03')])
if v != 'none' and not (compiler.get_id(), v) in skiplist:
cmd_std = " -std={} ".format(v)
self.assertIn(cmd_std, cmd)
try:
self.build()
except:
print('{} was {!r}'.format(lang_std, v))
raise
self.wipe()
# Check that an invalid std option in CFLAGS/CPPFLAGS fails
# Needed because by default ICC ignores invalid options
cmd_std = '-std=FAIL'
env_flags = p.upper() + 'FLAGS'
os.environ[env_flags] = cmd_std
self.init(testdir)
cmd = self.get_compdb()[0]['command']
qcmd_std = " {} ".format(cmd_std)
self.assertIn(qcmd_std, cmd)
with self.assertRaises(subprocess.CalledProcessError,
msg='{} should have failed'.format(qcmd_std)):
self.build()
def test_compiler_c_stds(self):
'''
Test that C stds specified for this compiler can all be used. Can't be
an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '1 trivial')
env = get_fake_env(testdir, self.builddir, self.prefix)
cc = env.detect_c_compiler(False)
self._test_stds_impl(testdir, cc, 'c')
def test_compiler_cpp_stds(self):
'''
Test that C++ stds specified for this compiler can all be used. Can't
be an ordinary test because it requires passing options to meson.
'''
testdir = os.path.join(self.common_test_dir, '2 cpp')
env = get_fake_env(testdir, self.builddir, self.prefix)
cpp = env.detect_cpp_compiler(False)
self._test_stds_impl(testdir, cpp, 'cpp')
def test_unity_subproj(self):
testdir = os.path.join(self.common_test_dir, '46 subproject')
self.init(testdir, extra_args='--unity=subprojects')
simpletest_id = Target.construct_id_from_path('subprojects/sublib', 'simpletest', '@exe')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', simpletest_id, 'simpletest-unity.c'))
sublib_id = Target.construct_id_from_path('subprojects/sublib', 'sublib', '@sha')
self.assertPathExists(os.path.join(self.builddir, 'subprojects/sublib', sublib_id, 'sublib-unity.c'))
self.assertPathDoesNotExist(os.path.join(self.builddir, 'user@exe/user-unity.c'))
self.build()
def test_installed_modes(self):
'''
Test that files installed by these tests have the correct permissions.
Can't be an ordinary test because our installed_files.txt is very basic.
'''
# Test file modes
testdir = os.path.join(self.common_test_dir, '12 data')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'etc', 'etcfile.dat')
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'rw------T'
self.assertEqual(want_mode, found_mode[1:])
f = os.path.join(self.installdir, 'usr', 'bin', 'runscript.sh')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-sr-x'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
self.assertEqual(0, statf.st_gid)
f = os.path.join(self.installdir, 'usr', 'share', 'progname',
'fileobject_datafile.dat')
orig = os.path.join(testdir, 'fileobject_datafile.dat')
statf = os.stat(f)
statorig = os.stat(orig)
found_mode = stat.filemode(statf.st_mode)
orig_mode = stat.filemode(statorig.st_mode)
self.assertEqual(orig_mode[1:], found_mode[1:])
self.assertEqual(os.getuid(), statf.st_uid)
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_gid)
self.wipe()
# Test directory modes
testdir = os.path.join(self.common_test_dir, '63 install subdir')
self.init(testdir)
self.install()
f = os.path.join(self.installdir, 'usr', 'share', 'sub1', 'second.dat')
statf = os.stat(f)
found_mode = stat.filemode(statf.st_mode)
want_mode = 'rwxr-x--t'
self.assertEqual(want_mode, found_mode[1:])
if os.getuid() == 0:
# The chown failed nonfatally if we're not root
self.assertEqual(0, statf.st_uid)
def test_installed_modes_extended(self):
'''
Test that files are installed with correct permissions using install_mode.
'''
testdir = os.path.join(self.common_test_dir, '196 install_mode')
self.init(testdir)
self.build()
self.install()
for fsobj, want_mode in [
('bin', 'drwxr-x---'),
('bin/runscript.sh', '-rwxr-sr-x'),
('bin/trivialprog', '-rwxr-sr-x'),
('include', 'drwxr-x---'),
('include/config.h', '-rw-rwSr--'),
('include/rootdir.h', '-r--r--r-T'),
('lib', 'drwxr-x---'),
('lib/libstat.a', '-rw---Sr--'),
('share', 'drwxr-x---'),
('share/man', 'drwxr-x---'),
('share/man/man1', 'drwxr-x---'),
('share/man/man1/foo.1', '-r--r--r-T'),
('share/sub1', 'drwxr-x---'),
('share/sub1/second.dat', '-rwxr-x--t'),
('subdir', 'drwxr-x---'),
('subdir/data.dat', '-rw-rwSr--'),
]:
f = os.path.join(self.installdir, 'usr', *fsobj.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(fsobj, want_mode, found_mode)))
# Ensure that introspect --installed works on all types of files
# FIXME: also verify the files list
self.introspect('--installed')
def test_install_umask(self):
'''
Test that files are installed with correct permissions using default
install umask of 022, regardless of the umask at time the worktree
was checked out or the build was executed.
'''
# Copy source tree to a temporary directory and change permissions
# there to simulate a checkout with umask 002.
orig_testdir = os.path.join(self.unit_test_dir, '26 install umask')
# Create a new testdir under tmpdir.
tmpdir = os.path.realpath(tempfile.mkdtemp())
self.addCleanup(windows_proof_rmtree, tmpdir)
testdir = os.path.join(tmpdir, '26 install umask')
# Copy the tree using shutil.copyfile, which will use the current umask
# instead of preserving permissions of the old tree.
save_umask = os.umask(0o002)
self.addCleanup(os.umask, save_umask)
shutil.copytree(orig_testdir, testdir, copy_function=shutil.copyfile)
# Preserve the executable status of subdir/sayhello though.
os.chmod(os.path.join(testdir, 'subdir', 'sayhello'), 0o775)
self.init(testdir)
# Run the build under a 027 umask now.
os.umask(0o027)
self.build()
# And keep umask 027 for the install step too.
self.install()
for executable in [
'bin/prog',
'share/subdir/sayhello',
]:
f = os.path.join(self.installdir, 'usr', *executable.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(executable, want_mode, found_mode)))
for directory in [
'usr',
'usr/bin',
'usr/include',
'usr/share',
'usr/share/man',
'usr/share/man/man1',
'usr/share/subdir',
]:
f = os.path.join(self.installdir, *directory.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = 'drwxr-xr-x'
self.assertEqual(want_mode, found_mode,
msg=('Expected directory %s to have mode %s but found %s instead.' %
(directory, want_mode, found_mode)))
for datafile in [
'include/sample.h',
'share/datafile.cat',
'share/file.dat',
'share/man/man1/prog.1',
'share/subdir/datafile.dog',
]:
f = os.path.join(self.installdir, 'usr', *datafile.split('/'))
found_mode = stat.filemode(os.stat(f).st_mode)
want_mode = '-rw-r--r--'
self.assertEqual(want_mode, found_mode,
msg=('Expected file %s to have mode %s but found %s instead.' %
(datafile, want_mode, found_mode)))
def test_cpp_std_override(self):
testdir = os.path.join(self.unit_test_dir, '6 std override')
self.init(testdir)
compdb = self.get_compdb()
# Don't try to use -std=c++03 as a check for the
# presence of a compiler flag, as ICC does not
# support it.
for i in compdb:
if 'prog98' in i['file']:
c98_comp = i['command']
if 'prog11' in i['file']:
c11_comp = i['command']
if 'progp' in i['file']:
plain_comp = i['command']
self.assertNotEqual(len(plain_comp), 0)
self.assertIn('-std=c++98', c98_comp)
self.assertNotIn('-std=c++11', c98_comp)
self.assertIn('-std=c++11', c11_comp)
self.assertNotIn('-std=c++98', c11_comp)
self.assertNotIn('-std=c++98', plain_comp)
self.assertNotIn('-std=c++11', plain_comp)
# Now werror
self.assertIn('-Werror', plain_comp)
self.assertNotIn('-Werror', c98_comp)
def test_run_installed(self):
if is_cygwin() or is_osx():
raise unittest.SkipTest('LD_LIBRARY_PATH and RPATH not applicable')
testdir = os.path.join(self.unit_test_dir, '7 run installed')
self.init(testdir)
self.build()
self.install()
installed_exe = os.path.join(self.installdir, 'usr/bin/prog')
installed_libdir = os.path.join(self.installdir, 'usr/foo')
installed_lib = os.path.join(installed_libdir, 'libfoo.so')
self.assertTrue(os.path.isfile(installed_exe))
self.assertTrue(os.path.isdir(installed_libdir))
self.assertTrue(os.path.isfile(installed_lib))
# Must fail when run without LD_LIBRARY_PATH to ensure that
# rpath has been properly stripped rather than pointing to the builddir.
self.assertNotEqual(subprocess.call(installed_exe, stderr=subprocess.DEVNULL), 0)
# When LD_LIBRARY_PATH is set it should start working.
# For some reason setting LD_LIBRARY_PATH in os.environ fails
# when all tests are run (but works when only this test is run),
# but doing this explicitly works.
env = os.environ.copy()
env['LD_LIBRARY_PATH'] = ':'.join([installed_libdir, env.get('LD_LIBRARY_PATH', '')])
self.assertEqual(subprocess.call(installed_exe, env=env), 0)
# Ensure that introspect --installed works
installed = self.introspect('--installed')
for v in installed.values():
self.assertTrue('prog' in v or 'foo' in v)
@skipIfNoPkgconfig
def test_order_of_l_arguments(self):
testdir = os.path.join(self.unit_test_dir, '8 -L -l order')
os.environ['PKG_CONFIG_PATH'] = testdir
self.init(testdir)
# NOTE: .pc file has -Lfoo -lfoo -Lbar -lbar but pkg-config reorders
# the flags before returning them to -Lfoo -Lbar -lfoo -lbar
# but pkgconf seems to not do that. Sigh. Support both.
expected_order = [('-L/me/first', '-lfoo1'),
('-L/me/second', '-lfoo2'),
('-L/me/first', '-L/me/second'),
('-lfoo1', '-lfoo2'),
('-L/me/second', '-L/me/third'),
('-L/me/third', '-L/me/fourth',),
('-L/me/third', '-lfoo3'),
('-L/me/fourth', '-lfoo4'),
('-lfoo3', '-lfoo4'),
]
with open(os.path.join(self.builddir, 'build.ninja')) as ifile:
for line in ifile:
if expected_order[0][0] in line:
for first, second in expected_order:
self.assertLess(line.index(first), line.index(second))
return
raise RuntimeError('Linker entries not found in the Ninja file.')
def test_introspect_dependencies(self):
'''
Tests that mesonintrospect --dependencies returns expected output.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
self.init(testdir)
glib_found = False
gobject_found = False
deps = self.introspect('--dependencies')
self.assertIsInstance(deps, list)
for dep in deps:
self.assertIsInstance(dep, dict)
self.assertIn('name', dep)
self.assertIn('compile_args', dep)
self.assertIn('link_args', dep)
if dep['name'] == 'glib-2.0':
glib_found = True
elif dep['name'] == 'gobject-2.0':
gobject_found = True
self.assertTrue(glib_found)
self.assertTrue(gobject_found)
if subprocess.call(['pkg-config', '--exists', 'glib-2.0 >= 2.56.2']) != 0:
raise unittest.SkipTest('glib >= 2.56.2 needed for the rest')
targets = self.introspect('--targets')
docbook_target = None
for t in targets:
if t['name'] == 'generated-gdbus-docbook':
docbook_target = t
break
self.assertIsInstance(docbook_target, dict)
ifile = self.introspect(['--target-files', 'generated-gdbus-docbook@cus'])[0]
self.assertListEqual(t['filename'], [os.path.join(self.builddir, 'gdbus/generated-gdbus-doc-' + os.path.basename(ifile))])
def test_build_rpath(self):
if is_cygwin():
raise unittest.SkipTest('Windows PE/COFF binaries do not use RPATH')
testdir = os.path.join(self.unit_test_dir, '10 build_rpath')
self.init(testdir)
self.build()
# C program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'prog'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/prog'))
self.assertEqual(install_rpath, '/baz')
# C++ program RPATH
build_rpath = get_rpath(os.path.join(self.builddir, 'progcxx'))
self.assertEqual(build_rpath, '$ORIGIN/sub:/foo/bar')
self.install()
install_rpath = get_rpath(os.path.join(self.installdir, 'usr/bin/progcxx'))
self.assertEqual(install_rpath, 'baz')
@skip_if_not_base_option('b_sanitize')
def test_pch_with_address_sanitizer(self):
if is_cygwin():
raise unittest.SkipTest('asan not available on Cygwin')
testdir = os.path.join(self.common_test_dir, '13 pch')
self.init(testdir, ['-Db_sanitize=address'])
self.build()
compdb = self.get_compdb()
for i in compdb:
self.assertIn("-fsanitize=address", i["command"])
def test_coverage(self):
gcovr_exe, gcovr_new_rootdir = mesonbuild.environment.detect_gcovr()
if not gcovr_exe:
raise unittest.SkipTest('gcovr not found')
if not shutil.which('genhtml') and not gcovr_new_rootdir:
raise unittest.SkipTest('genhtml not found and gcovr is too old')
if 'clang' in os.environ.get('CC', ''):
# We need to use llvm-cov instead of gcovr with clang
raise unittest.SkipTest('Coverage does not work with clang right now, help wanted!')
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir, ['-Db_coverage=true'])
self.build()
self.run_tests()
self.run_target('coverage-html')
def test_cross_find_program(self):
testdir = os.path.join(self.unit_test_dir, '11 cross prog')
crossfile = tempfile.NamedTemporaryFile(mode='w')
print(os.path.join(testdir, 'some_cross_tool.py'))
crossfile.write('''[binaries]
c = '/usr/bin/cc'
ar = '/usr/bin/ar'
strip = '/usr/bin/ar'
sometool.py = ['{0}']
someothertool.py = '{0}'
[properties]
[host_machine]
system = 'linux'
cpu_family = 'arm'
cpu = 'armv7' # Not sure if correct.
endian = 'little'
'''.format(os.path.join(testdir, 'some_cross_tool.py')))
crossfile.flush()
self.meson_cross_file = crossfile.name
self.init(testdir)
def test_reconfigure(self):
testdir = os.path.join(self.unit_test_dir, '13 reconfigure')
self.init(testdir, ['-Db_coverage=true'], default_args=False)
self.build('reconfigure')
def test_vala_generated_source_buildir_inside_source_tree(self):
'''
Test that valac outputs generated C files in the expected location when
the builddir is a subdir of the source tree.
'''
if not shutil.which('valac'):
raise unittest.SkipTest('valac not installed.')
testdir = os.path.join(self.vala_test_dir, '8 generated sources')
newdir = os.path.join(self.builddir, 'srctree')
shutil.copytree(testdir, newdir)
testdir = newdir
# New builddir
builddir = os.path.join(testdir, 'subdir/_build')
os.makedirs(builddir, exist_ok=True)
self.change_builddir(builddir)
self.init(testdir)
self.build()
def test_old_gnome_module_codepaths(self):
'''
A lot of code in the GNOME module is conditional on the version of the
glib tools that are installed, and breakages in the old code can slip
by once the CI has a newer glib version. So we force the GNOME module
to pretend that it's running on an ancient glib so the fallback code is
also tested.
'''
testdir = os.path.join(self.framework_test_dir, '7 gnome')
os.environ['MESON_UNIT_TEST_PRETEND_GLIB_OLD'] = "1"
mesonbuild.modules.gnome.native_glib_version = '2.20'
self.init(testdir, inprocess=True)
self.build()
mesonbuild.modules.gnome.native_glib_version = None
@skipIfNoPkgconfig
def test_pkgconfig_usage(self):
testdir1 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependency')
testdir2 = os.path.join(self.unit_test_dir, '27 pkgconfig usage/dependee')
if subprocess.call(['pkg-config', '--cflags', 'glib-2.0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL) != 0:
raise unittest.SkipTest('Glib 2.0 dependency not available.')
with tempfile.TemporaryDirectory() as tempdirname:
self.init(testdir1, ['--prefix=' + tempdirname, '--libdir=lib'], default_args=False)
self.install(use_destdir=False)
shutil.rmtree(self.builddir)
os.mkdir(self.builddir)
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'libpkgdep.pc')))
lib_dir = os.path.join(tempdirname, 'lib')
os.environ['PKG_CONFIG_PATH'] = pkg_dir
# Private internal libraries must not leak out.
pkg_out = subprocess.check_output(['pkg-config', '--static', '--libs', 'libpkgdep'])
self.assertFalse(b'libpkgdep-int' in pkg_out, 'Internal library leaked out.')
# Dependencies must not leak to cflags when building only a shared library.
pkg_out = subprocess.check_output(['pkg-config', '--cflags', 'libpkgdep'])
self.assertFalse(b'glib' in pkg_out, 'Internal dependency leaked to headers.')
# Test that the result is usable.
self.init(testdir2)
self.build()
myenv = os.environ.copy()
myenv['LD_LIBRARY_PATH'] = ':'.join([lib_dir, myenv.get('LD_LIBRARY_PATH', '')])
if is_cygwin():
bin_dir = os.path.join(tempdirname, 'bin')
myenv['PATH'] = bin_dir + os.pathsep + myenv['PATH']
self.assertTrue(os.path.isdir(lib_dir))
test_exe = os.path.join(self.builddir, 'pkguser')
self.assertTrue(os.path.isfile(test_exe))
subprocess.check_call(test_exe, env=myenv)
@skipIfNoPkgconfig
def test_pkgconfig_internal_libraries(self):
'''
'''
with tempfile.TemporaryDirectory() as tempdirname:
# build library
testdirbase = os.path.join(self.unit_test_dir, '32 pkgconfig use libraries')
testdirlib = os.path.join(testdirbase, 'lib')
self.init(testdirlib, extra_args=['--prefix=' + tempdirname,
'--libdir=lib',
'--default-library=static'], default_args=False)
self.build()
self.install(use_destdir=False)
# build user of library
pkg_dir = os.path.join(tempdirname, 'lib/pkgconfig')
os.environ['PKG_CONFIG_PATH'] = pkg_dir
self.new_builddir()
self.init(os.path.join(testdirbase, 'app'))
self.build()
@skipIfNoPkgconfig
def test_pkgconfig_formatting(self):
testdir = os.path.join(self.unit_test_dir, '38 pkgconfig format')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs-only-l', 'libsomething'], env=myenv)
deps = [b'-lgobject-2.0', b'-lgio-2.0', b'-lglib-2.0', b'-lsomething']
if is_windows() or is_cygwin() or is_osx():
# On Windows, libintl is a separate library
deps.append(b'-lintl')
self.assertEqual(set(deps), set(stdo.split()))
@skipIfNoPkgconfig
@skip_if_not_language('cs')
def test_pkgconfig_csharp_library(self):
testdir = os.path.join(self.unit_test_dir, '48 pkgconfig csharp library')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
self.assertEqual("-r/usr/lib/libsomething.dll", str(stdo.decode('ascii')).strip())
@skipIfNoPkgconfig
def test_pkgconfig_link_order(self):
'''
Test that libraries are listed before their dependencies.
'''
testdir = os.path.join(self.unit_test_dir, '50 pkgconfig static link order')
self.init(testdir)
myenv = os.environ.copy()
myenv['PKG_CONFIG_PATH'] = self.privatedir
stdo = subprocess.check_output(['pkg-config', '--libs', 'libsomething'], env=myenv)
deps = stdo.split()
self.assertTrue(deps.index(b'-lsomething') < deps.index(b'-ldependency'))
def test_deterministic_dep_order(self):
'''
Test that the dependencies are always listed in a deterministic order.
'''
testdir = os.path.join(self.unit_test_dir, '43 dep order')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'build myexe:' in line or 'build myexe.exe:' in line:
self.assertIn('liblib1.a liblib2.a', line)
return
raise RuntimeError('Could not find the build rule')
def test_deterministic_rpath_order(self):
'''
Test that the rpaths are always listed in a deterministic order.
'''
if is_cygwin():
raise unittest.SkipTest('rpath are not used on Cygwin')
testdir = os.path.join(self.unit_test_dir, '42 rpath order')
self.init(testdir)
if is_osx():
rpathre = re.compile('-rpath,.*/subprojects/sub1.*-rpath,.*/subprojects/sub2')
else:
rpathre = re.compile('-rpath,\$\$ORIGIN/subprojects/sub1:\$\$ORIGIN/subprojects/sub2')
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if '-rpath' in line:
self.assertRegex(line, rpathre)
return
raise RuntimeError('Could not find the rpath')
def test_override_with_exe_dep(self):
'''
Test that we produce the correct dependencies when a program is overridden with an executable.
'''
testdir = os.path.join(self.common_test_dir, '202 override with exe')
self.init(testdir)
with open(os.path.join(self.builddir, 'build.ninja')) as bfile:
for line in bfile:
if 'main1.c:' in line or 'main2.c:' in line:
self.assertIn('| subprojects/sub/foobar', line)
@skipIfNoPkgconfig
def test_usage_external_library(self):
'''
Test that uninstalled usage of an external library (from the system or
PkgConfigDependency) works. On macOS, this workflow works out of the
box. On Linux, BSDs, Windows, etc, you need to set extra arguments such
as LD_LIBRARY_PATH, etc, so this test is skipped.
The system library is found with cc.find_library() and pkg-config deps.
'''
oldprefix = self.prefix
# Install external library so we can find it
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'external library')
# install into installdir without using DESTDIR
installdir = self.installdir
self.prefix = installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
self.install(use_destdir=False)
## New builddir for the consumer
self.new_builddir()
os.environ['LIBRARY_PATH'] = os.path.join(installdir, self.libdir)
os.environ['PKG_CONFIG_PATH'] = os.path.join(installdir, self.libdir, 'pkgconfig')
testdir = os.path.join(self.unit_test_dir, '40 external, internal library rpath', 'built library')
# install into installdir without using DESTDIR
self.prefix = self.installdir
self.init(testdir)
self.prefix = oldprefix
self.build()
# test uninstalled
self.run_tests()
if not is_osx():
# Rest of the workflow only works on macOS
return
# test running after installation
self.install(use_destdir=False)
prog = os.path.join(self.installdir, 'bin', 'prog')
self._run([prog])
out = self._run(['otool', '-L', prog])
self.assertNotIn('@rpath', out)
## New builddir for testing that DESTDIR is not added to install_name
self.new_builddir()
# install into installdir with DESTDIR
self.init(testdir)
self.build()
# test running after installation
self.install()
prog = self.installdir + os.path.join(self.prefix, 'bin', 'prog')
lib = self.installdir + os.path.join(self.prefix, 'lib', 'libbar_built.dylib')
for f in prog, lib:
out = self._run(['otool', '-L', f])
# Ensure that the otool output does not contain self.installdir
self.assertNotRegex(out, self.installdir + '.*dylib ')
def install_subdir_invalid_symlinks(self, testdir, subdir_path):
'''
Test that installation of broken symlinks works fine.
https://github.com/mesonbuild/meson/issues/3914
'''
testdir = os.path.join(self.common_test_dir, testdir)
subdir = os.path.join(testdir, subdir_path)
curdir = os.getcwd()
os.chdir(subdir)
# Can't distribute broken symlinks in the source tree because it breaks
# the creation of zipapps. Create it dynamically and run the test by
# hand.
src = '../../nonexistent.txt'
os.symlink(src, 'invalid-symlink.txt')
try:
self.init(testdir)
self.build()
self.install()
install_path = subdir_path.split(os.path.sep)[-1]
link = os.path.join(self.installdir, 'usr', 'share', install_path, 'invalid-symlink.txt')
self.assertTrue(os.path.islink(link), msg=link)
self.assertEqual(src, os.readlink(link))
self.assertFalse(os.path.isfile(link), msg=link)
finally:
os.remove(os.path.join(subdir, 'invalid-symlink.txt'))
os.chdir(curdir)
def test_install_subdir_symlinks(self):
self.install_subdir_invalid_symlinks('63 install subdir', os.path.join('sub', 'sub1'))
def test_install_subdir_symlinks_with_default_umask(self):
self.install_subdir_invalid_symlinks('196 install_mode', 'sub2')
def test_install_subdir_symlinks_with_default_umask_and_mode(self):
self.install_subdir_invalid_symlinks('196 install_mode', 'sub1')
@skipIfNoPkgconfigDep('gmodule-2.0')
def test_ldflag_dedup(self):
testdir = os.path.join(self.unit_test_dir, '49 ldflagdedup')
if is_cygwin() or is_osx():
raise unittest.SkipTest('Not applicable on Cygwin or OSX.')
self.init(testdir)
build_ninja = os.path.join(self.builddir, 'build.ninja')
max_count = 0
search_term = '-Wl,--export-dynamic'
with open(build_ninja, 'r', encoding='utf-8') as f:
for line in f:
max_count = max(max_count, line.count(search_term))
self.assertEqual(max_count, 1, 'Export dynamic incorrectly deduplicated.')
class LinuxCrossArmTests(BasePlatformTests):
'''
Tests that cross-compilation to Linux/ARM works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'ubuntu-armhf.txt')
def test_cflags_cross_environment_pollution(self):
'''
Test that the CFLAGS environment variable does not pollute the cross
environment. This can't be an ordinary test case because we need to
inspect the compiler database.
'''
testdir = os.path.join(self.common_test_dir, '3 static')
os.environ['CFLAGS'] = '-DBUILD_ENVIRONMENT_ONLY'
self.init(testdir)
compdb = self.get_compdb()
self.assertNotIn('-DBUILD_ENVIRONMENT_ONLY', compdb[0]['command'])
def test_cross_file_overrides_always_args(self):
'''
Test that $lang_args in cross files always override get_always_args().
Needed for overriding the default -D_FILE_OFFSET_BITS=64 on some
architectures such as some Android versions and Raspbian.
https://github.com/mesonbuild/meson/issues/3049
https://github.com/mesonbuild/meson/issues/3089
'''
testdir = os.path.join(self.unit_test_dir, '33 cross file overrides always args')
self.meson_cross_file = os.path.join(testdir, 'ubuntu-armhf-overrides.txt')
self.init(testdir)
compdb = self.get_compdb()
self.assertRegex(compdb[0]['command'], '-D_FILE_OFFSET_BITS=64.*-U_FILE_OFFSET_BITS')
self.build()
def test_cross_libdir(self):
# When cross compiling "libdir" should default to "lib"
# rather than "lib/x86_64-linux-gnu" or something like that.
testdir = os.path.join(self.common_test_dir, '1 trivial')
self.init(testdir)
for i in self.introspect('--buildoptions'):
if i['name'] == 'libdir':
self.assertEqual(i['value'], 'lib')
return
self.assertTrue(False, 'Option libdir not in introspect data.')
class LinuxCrossMingwTests(BasePlatformTests):
'''
Tests that cross-compilation to Windows/MinGW works
'''
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.meson_cross_file = os.path.join(src_root, 'cross', 'linux-mingw-w64-64bit.txt')
def test_exe_wrapper_behaviour(self):
'''
Test that an exe wrapper that isn't found doesn't cause compiler sanity
checks and compiler checks to fail, but causes configure to fail if it
requires running a cross-built executable (custom_target or run_target)
and causes the tests to be skipped if they are run.
'''
testdir = os.path.join(self.unit_test_dir, '36 exe_wrapper behaviour')
# Configures, builds, and tests fine by default
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
os.mkdir(self.builddir)
# Change cross file to use a non-existing exe_wrapper and it should fail
self.meson_cross_file = os.path.join(testdir, 'broken-cross.txt')
# Force tracebacks so we can detect them properly
os.environ['MESON_FORCE_BACKTRACE'] = '1'
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*target.*use-exe-wrapper'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Drun-target=false', inprocess=True)
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*run target.*run-prog'):
# Must run in-process or we'll get a generic CalledProcessError
self.init(testdir, extra_args='-Dcustom-target=false', inprocess=True)
self.init(testdir, extra_args=['-Dcustom-target=false', '-Drun-target=false'])
self.build()
with self.assertRaisesRegex(MesonException, 'exe_wrapper.*PATH'):
# Must run in-process or we'll get a generic CalledProcessError
self.run_tests(inprocess=True)
class PythonTests(BasePlatformTests):
'''
Tests that verify compilation of python extension modules
'''
def test_versions(self):
if self.backend is not Backend.ninja:
raise unittest.SkipTest('Skipping python tests with {} backend'.format(self.backend.name))
testdir = os.path.join(self.src_root, 'test cases', 'unit', '39 python extmodule')
# No python version specified, this will use meson's python
self.init(testdir)
self.build()
self.run_tests()
self.wipe()
# When specifying a known name, (python2 / python3) the module
# will also try 'python' as a fallback and use it if the major
# version matches
try:
self.init(testdir, ['-Dpython=python2'])
self.build()
self.run_tests()
except unittest.SkipTest:
# python2 is not necessarily installed on the test machine,
# if it is not, or the python headers can't be found, the test
# will raise MESON_SKIP_TEST, we could check beforehand what version
# of python is available, but it's a bit of a chicken and egg situation,
# as that is the job of the module, so we just ask for forgiveness rather
# than permission.
pass
self.wipe()
for py in ('pypy', 'pypy3'):
try:
self.init(testdir, ['-Dpython=%s' % py])
except unittest.SkipTest:
# Same as above, pypy2 and pypy3 are not expected to be present
# on the test system, the test project only raises in these cases
continue
# We have a pypy, this is expected to work
self.build()
self.run_tests()
self.wipe()
# The test is configured to error out with MESON_SKIP_TEST
# in case it could not find python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, ['-Dpython=not-python'])
self.wipe()
# While dir is an external command on both Windows and Linux,
# it certainly isn't python
with self.assertRaises(unittest.SkipTest):
self.init(testdir, ['-Dpython=dir'])
self.wipe()
class RewriterTests(unittest.TestCase):
def setUp(self):
super().setUp()
src_root = os.path.dirname(__file__)
self.testroot = os.path.realpath(tempfile.mkdtemp())
self.rewrite_command = python_command + [os.path.join(src_root, 'mesonrewriter.py')]
self.tmpdir = os.path.realpath(tempfile.mkdtemp())
self.workdir = os.path.join(self.tmpdir, 'foo')
self.test_dir = os.path.join(src_root, 'test cases/rewrite')
def tearDown(self):
windows_proof_rmtree(self.tmpdir)
def read_contents(self, fname):
with open(os.path.join(self.workdir, fname)) as f:
return f.read()
def check_effectively_same(self, mainfile, truth):
mf = self.read_contents(mainfile)
t = self.read_contents(truth)
# Rewriting is not guaranteed to do a perfect job of
# maintaining whitespace.
self.assertEqual(mf.replace(' ', ''), t.replace(' ', ''))
def prime(self, dirname):
shutil.copytree(os.path.join(self.test_dir, dirname), self.workdir)
def test_basic(self):
self.prime('1 basic')
subprocess.check_call(self.rewrite_command + ['remove',
'--target=trivialprog',
'--filename=notthere.c',
'--sourcedir', self.workdir],
universal_newlines=True)
self.check_effectively_same('meson.build', 'removed.txt')
subprocess.check_call(self.rewrite_command + ['add',
'--target=trivialprog',
'--filename=notthere.c',
'--sourcedir', self.workdir],
universal_newlines=True)
self.check_effectively_same('meson.build', 'added.txt')
subprocess.check_call(self.rewrite_command + ['remove',
'--target=trivialprog',
'--filename=notthere.c',
'--sourcedir', self.workdir],
universal_newlines=True)
self.check_effectively_same('meson.build', 'removed.txt')
def test_subdir(self):
self.prime('2 subdirs')
top = self.read_contents('meson.build')
s2 = self.read_contents('sub2/meson.build')
subprocess.check_call(self.rewrite_command + ['remove',
'--target=something',
'--filename=second.c',
'--sourcedir', self.workdir],
universal_newlines=True)
self.check_effectively_same('sub1/meson.build', 'sub1/after.txt')
self.assertEqual(top, self.read_contents('meson.build'))
self.assertEqual(s2, self.read_contents('sub2/meson.build'))
class NativeFileTests(BasePlatformTests):
def setUp(self):
super().setUp()
self.testcase = os.path.join(self.unit_test_dir, '46 native file binary')
self.current_config = 0
self.current_wrapper = 0
def helper_create_native_file(self, values):
"""Create a config file as a temporary file.
values should be a nested dictionary structure of {section: {key:
value}}
"""
filename = os.path.join(self.builddir, 'generated{}.config'.format(self.current_config))
self.current_config += 1
with open(filename, 'wt') as f:
for section, entries in values.items():
f.write('[{}]\n'.format(section))
for k, v in entries.items():
f.write("{}='{}'\n".format(k, v))
return filename
def helper_create_binary_wrapper(self, binary, **kwargs):
"""Creates a wrapper around a binary that overrides specific values."""
filename = os.path.join(self.builddir, 'binary_wrapper{}.py'.format(self.current_wrapper))
self.current_wrapper += 1
if is_haiku():
chbang = '#!/bin/env python3'
else:
chbang = '#!/usr/bin/env python3'
with open(filename, 'wt') as f:
f.write(textwrap.dedent('''\
{}
import argparse
import subprocess
import sys
def main():
parser = argparse.ArgumentParser()
'''.format(chbang)))
for name in kwargs:
f.write(' parser.add_argument("-{0}", "--{0}", action="store_true")\n'.format(name))
f.write(' args, extra_args = parser.parse_known_args()\n')
for name, value in kwargs.items():
f.write(' if args.{}:\n'.format(name))
f.write(' print("{}", file=sys.{})\n'.format(value, kwargs.get('outfile', 'stdout')))
f.write(' sys.exit(0)\n')
f.write(textwrap.dedent('''
ret = subprocess.run(
["{}"] + extra_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding='utf-8')
print(ret.stdout)
print(ret.stderr, file=sys.stderr)
sys.exit(ret.returncode)
if __name__ == '__main__':
main()
'''.format(binary)))
if not is_windows():
os.chmod(filename, 0o755)
return filename
# On windows we need yet another level of indirection, as cmd cannot
# invoke python files itself, so instead we generate a .bat file, which
# invokes our python wrapper
batfile = os.path.join(self.builddir, 'binary_wrapper{}.bat'.format(self.current_wrapper))
with open(batfile, 'wt') as f:
if mesonbuild.environment.detect_msys2_arch():
f.write(r'@python3 {} %*'.format(filename))
else:
f.write('@py -3 {} %*'.format(filename))
return batfile
def helper_for_compiler(self, lang, cb):
"""Helper for generating tests for overriding compilers for langaugages
with more than one implementation, such as C, C++, ObjC, ObjC++, and D.
"""
env = get_fake_env('', '', '')
getter = getattr(env, 'detect_{}_compiler'.format(lang))
if lang not in ['cs']:
getter = functools.partial(getter, False)
cc = getter()
binary, newid = cb(cc)
env.binaries.host.binaries[lang] = binary
compiler = getter()
self.assertEqual(compiler.id, newid)
def test_multiple_native_files_override(self):
wrapper = self.helper_create_binary_wrapper('bash', version='foo')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config2 = self.helper_create_native_file({'binaries': {'bash': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def test_multiple_native_files(self):
wrapper = self.helper_create_binary_wrapper('bash', version='12345')
config = self.helper_create_native_file({'binaries': {'bash': wrapper}})
wrapper = self.helper_create_binary_wrapper('python')
config2 = self.helper_create_native_file({'binaries': {'python': wrapper}})
self.init(self.testcase, extra_args=[
'--native-file', config, '--native-file', config2,
'-Dcase=find_program'])
def _simple_test(self, case, binary):
wrapper = self.helper_create_binary_wrapper(binary, version='12345')
config = self.helper_create_native_file({'binaries': {binary: wrapper}})
self.init(self.testcase, extra_args=['--native-file', config, '-Dcase={}'.format(case)])
def test_find_program(self):
self._simple_test('find_program', 'bash')
def test_config_tool_dep(self):
# Do the skip at this level to avoid screwing up the cache
if mesonbuild.environment.detect_msys2_arch():
raise unittest.SkipTest('Skipped due to problems with LLVM on MSYS2')
if not shutil.which('llvm-config'):
raise unittest.SkipTest('No llvm-installed, cannot test')
self._simple_test('config_dep', 'llvm-config')
def test_python3_module(self):
self._simple_test('python3', 'python3')
def test_python_module(self):
if is_windows():
# Bat adds extra crap to stdout, so the version check logic in the
# python module breaks. This is fine on other OSes because they
# don't need the extra indirection.
raise unittest.SkipTest('bat indirection breaks internal sanity checks.')
self._simple_test('python', 'python')
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_value('CC')
def test_c_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not shutil.which('gcc'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('c', cb)
@unittest.skipIf(is_windows(), 'Setting up multiple compilers on windows is hard')
@skip_if_env_value('CXX')
def test_cpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not shutil.which('g++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('cpp', cb)
@skip_if_not_language('objc')
@skip_if_env_value('OBJC')
def test_objc_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang', 'clang'
if not shutil.which('gcc'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'gcc', 'gcc'
self.helper_for_compiler('objc', cb)
@skip_if_not_language('objcpp')
@skip_if_env_value('OBJCXX')
def test_objcpp_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if not shutil.which('clang++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'clang++', 'clang'
if not shutil.which('g++'):
raise unittest.SkipTest('Only one compiler found, cannot test.')
return 'g++', 'gcc'
self.helper_for_compiler('objcpp', cb)
@skip_if_not_language('d')
@skip_if_env_value('DC')
def test_d_compiler(self):
def cb(comp):
if comp.id == 'dmd':
if shutil.which('ldc'):
return 'ldc', 'ldc'
elif shutil.which('gdc'):
return 'gdc', 'gdc'
else:
raise unittest.SkipTest('No alternative dlang compiler found.')
if shutil.which('dmd'):
return 'dmd', 'dmd'
raise unittest.SkipTest('No alternative dlang compiler found.')
self.helper_for_compiler('d', cb)
@skip_if_not_language('cs')
@skip_if_env_value('CSC')
def test_cs_compiler(self):
def cb(comp):
if comp.id == 'csc':
if not shutil.which('mcs'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'mcs', 'mcs'
if not shutil.which('csc'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'csc', 'csc'
self.helper_for_compiler('cs', cb)
@skip_if_not_language('fortran')
@skip_if_env_value('FC')
def test_fortran_compiler(self):
def cb(comp):
if comp.id == 'gcc':
if shutil.which('ifort'):
return 'ifort', 'intel'
# XXX: there are several other fortran compilers meson
# supports, but I don't have any of them to test with
raise unittest.SkipTest('No alternate Fortran implementation.')
if not shutil.which('gfortran'):
raise unittest.SkipTest('No alternate C# implementation.')
return 'gfortran', 'gcc'
self.helper_for_compiler('fortran', cb)
def _single_implementation_compiler(self, lang, binary, version_str, version):
"""Helper for languages with a single (supported) implementation.
Builds a wrapper around the compiler to override the version.
"""
wrapper = self.helper_create_binary_wrapper(binary, version=version_str)
env = get_fake_env('', '', '')
getter = getattr(env, 'detect_{}_compiler'.format(lang))
if lang in ['rust']:
getter = functools.partial(getter, False)
env.binaries.host.binaries[lang] = wrapper
compiler = getter()
self.assertEqual(compiler.version, version)
@skip_if_not_language('vala')
@skip_if_env_value('VALAC')
def test_vala_compiler(self):
self._single_implementation_compiler(
'vala', 'valac', 'Vala 1.2345', '1.2345')
@skip_if_not_language('rust')
@skip_if_env_value('RUSTC')
def test_rust_compiler(self):
self._single_implementation_compiler(
'rust', 'rustc', 'rustc 1.2345', '1.2345')
@skip_if_not_language('java')
def test_java_compiler(self):
self._single_implementation_compiler(
'java', 'javac', 'javac 9.99.77', '9.99.77')
@skip_if_not_language('swift')
def test_swift_compiler(self):
wrapper = self.helper_create_binary_wrapper(
'swiftc', version='Swift 1.2345', outfile='stderr')
env = get_fake_env('', '', '')
env.binaries.host.binaries['swift'] = wrapper
compiler = env.detect_swift_compiler()
self.assertEqual(compiler.version, '1.2345')
def unset_envs():
# For unit tests we must fully control all command lines
# so that there are no unexpected changes coming from the
# environment, for example when doing a package build.
varnames = ['CPPFLAGS', 'LDFLAGS'] + list(mesonbuild.compilers.compilers.cflags_mapping.values())
for v in varnames:
if v in os.environ:
del os.environ[v]
def should_run_cross_arm_tests():
return shutil.which('arm-linux-gnueabihf-gcc') and not platform.machine().lower().startswith('arm')
def should_run_cross_mingw_tests():
return shutil.which('x86_64-w64-mingw32-gcc') and not (is_windows() or is_cygwin())
def main():
unset_envs()
cases = ['InternalTests', 'DataTests', 'AllPlatformTests', 'FailureTests',
'PythonTests', 'NativeFileTests']
if not is_windows():
cases += ['LinuxlikeTests']
if should_run_cross_arm_tests():
cases += ['LinuxCrossArmTests']
if should_run_cross_mingw_tests():
cases += ['LinuxCrossMingwTests']
if is_windows() or is_cygwin():
cases += ['WindowsTests']
if is_osx():
cases += ['DarwinTests']
return unittest.main(defaultTest=cases, buffer=True)
if __name__ == '__main__':
sys.exit(main())
| 45.482602
| 199
| 0.585875
|
7f29599ade318ec5f44f34edfceb4539635dac58
| 518
|
py
|
Python
|
RecoEgamma/ElectronIdentification/python/VIDElectronSelector.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
RecoEgamma/ElectronIdentification/python/VIDElectronSelector.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4
|
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
RecoEgamma/ElectronIdentification/python/VIDElectronSelector.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1
|
2022-02-27T06:12:26.000Z
|
2022-02-27T06:12:26.000Z
|
import ROOT
from PhysicsTools.SelectorUtils.VIDSelectorBase import VIDSelectorBase
class VIDElectronSelector(VIDSelectorBase):
def __init__(self,pythonpset = None):
builder = ROOT.MakeVersionedSelector(ROOT.reco.GsfElectron)
ptrmaker = ROOT.MakePtrFromCollection(ROOT.vector(ROOT.pat.Electron),
ROOT.pat.Electron,
ROOT.reco.GsfElectron)
VIDSelectorBase.__init__(self,builder,ptrmaker,pythonpset)
| 47.090909
| 77
| 0.65251
|
658066032eedceca2ea1f42e08874cfe58c653b2
| 2,799
|
py
|
Python
|
algo/distributions.py
|
yunshengtian/ppo-mujoco
|
1989bc5491d2abc3d015d0ec81d34ea166c3352b
|
[
"MIT"
] | 1
|
2021-01-27T08:59:31.000Z
|
2021-01-27T08:59:31.000Z
|
algo/distributions.py
|
yunshengtian/ppo-mujoco
|
1989bc5491d2abc3d015d0ec81d34ea166c3352b
|
[
"MIT"
] | null | null | null |
algo/distributions.py
|
yunshengtian/ppo-mujoco
|
1989bc5491d2abc3d015d0ec81d34ea166c3352b
|
[
"MIT"
] | 1
|
2021-01-20T07:56:54.000Z
|
2021-01-20T07:56:54.000Z
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from algo.utils import AddBias, init
"""
Modify standard PyTorch distributions so they are compatible with this code.
"""
#
# Standardize distribution interfaces
#
# Categorical
class FixedCategorical(torch.distributions.Categorical):
def sample(self):
return super().sample().unsqueeze(-1)
def log_probs(self, actions):
return (
super()
.log_prob(actions.squeeze(-1))
.view(actions.size(0), -1)
.sum(-1)
.unsqueeze(-1)
)
def mode(self):
return self.probs.argmax(dim=-1, keepdim=True)
# Normal
class FixedNormal(torch.distributions.Normal):
def log_probs(self, actions):
return super().log_prob(actions).sum(-1, keepdim=True)
def entrop(self):
return super.entropy().sum(-1)
def mode(self):
return self.mean
# Bernoulli
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), -1).sum(-1).unsqueeze(-1)
def entropy(self):
return super().entropy().sum(-1)
def mode(self):
return torch.gt(self.probs, 0.5).float()
class Categorical(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Categorical, self).__init__()
init_ = lambda m: init(
m,
nn.init.orthogonal_,
lambda x: nn.init.constant_(x, 0),
gain=0.01)
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedCategorical(logits=x)
class DiagGaussian(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(DiagGaussian, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.fc_mean = init_(nn.Linear(num_inputs, num_outputs))
self.logstd = AddBias(torch.zeros(num_outputs))
def forward(self, x):
action_mean = self.fc_mean(x)
# An ugly hack for my KFAC implementation.
zeros = torch.zeros(action_mean.size())
if x.is_cuda:
zeros = zeros.cuda()
action_logstd = self.logstd(zeros)
return FixedNormal(action_mean, action_logstd.exp())
class Bernoulli(nn.Module):
def __init__(self, num_inputs, num_outputs):
super(Bernoulli, self).__init__()
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.linear = init_(nn.Linear(num_inputs, num_outputs))
def forward(self, x):
x = self.linear(x)
return FixedBernoulli(logits=x)
| 25.445455
| 86
| 0.618078
|
bf65f35ed0ef2ef97a8d9512285e050e9b548821
| 2,204
|
py
|
Python
|
tests/functional/test_scoping/test_metamodel_provider.py
|
goto40/textX-gui-experiment2
|
bcacc33f6e6cd49468fe3a4746b01505af2b4862
|
[
"MIT"
] | null | null | null |
tests/functional/test_scoping/test_metamodel_provider.py
|
goto40/textX-gui-experiment2
|
bcacc33f6e6cd49468fe3a4746b01505af2b4862
|
[
"MIT"
] | null | null | null |
tests/functional/test_scoping/test_metamodel_provider.py
|
goto40/textX-gui-experiment2
|
bcacc33f6e6cd49468fe3a4746b01505af2b4862
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from os.path import dirname, abspath
from pytest import raises
import textx.scoping as scoping
import textx.scoping.providers as scoping_providers
from textx import metamodel_from_file
from textx.scoping.tools import get_unique_named_object_in_all_models
def test_metamodel_provider_basic_test():
"""
This test checks that the global MetaModel Provider
works (basic function): It is checked that no filename patterns
are used twice. It is checked that the correct metamodel
is used to load a model (by loading a model constellation using
two metamodels).
"""
#################################
# META MODEL DEF
#################################
mm_components = metamodel_from_file(
abspath(dirname(__file__)) + '/metamodel_provider/Components.tx')
mm_components.register_scope_providers({
"*.*": scoping_providers.FQNImportURI(),
"Connection.from_port":
scoping_providers.RelativeName("from_inst.component.slots"),
"Connection.to_port":
scoping_providers.RelativeName("to_inst.component.slots"),
})
mm_users = metamodel_from_file(
abspath(dirname(__file__)) + '/metamodel_provider/Users.tx')
mm_users.register_scope_providers({
"*.*": scoping_providers.FQNImportURI(),
})
scoping.MetaModelProvider.add_metamodel("*.components", mm_components)
scoping.MetaModelProvider.add_metamodel("*.users", mm_users)
with raises(Exception, match=r'.*pattern.*already registered.*'):
scoping.MetaModelProvider.add_metamodel("*.users", mm_users)
#################################
# MODEL PARSING
#################################
my_model = mm_users.model_from_file(
abspath(dirname(__file__)) + "/metamodel_provider/example.users")
#################################
# TEST MODEL
#################################
user = get_unique_named_object_in_all_models(my_model, "pi")
action1 = get_unique_named_object_in_all_models(my_model, "action1")
assert user.instance is action1
#################################
# END
#################################
| 33.907692
| 74
| 0.630218
|
e17d36494fefb55cddf03a24e59c690bbdc0c1a7
| 233
|
py
|
Python
|
pl_bolts/loggers/__init__.py
|
dkmiller/pytorch-lightning-bolts
|
1373f3843770a05a9f3b19752da7c095367702ed
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/loggers/__init__.py
|
dkmiller/pytorch-lightning-bolts
|
1373f3843770a05a9f3b19752da7c095367702ed
|
[
"Apache-2.0"
] | null | null | null |
pl_bolts/loggers/__init__.py
|
dkmiller/pytorch-lightning-bolts
|
1373f3843770a05a9f3b19752da7c095367702ed
|
[
"Apache-2.0"
] | null | null | null |
"""
Collection of PyTorchLightning loggers
"""
__all__ = []
try:
from pl_bolts.loggers.azureml import AzureMlLogger
except ImportError: # pragma: no-cover
pass # pragma: no-cover
else:
__all__.append('AzureMlLogger')
| 17.923077
| 54
| 0.716738
|
6ce501b160dda321ec6b8a40eae3f1c0aa28c311
| 732
|
py
|
Python
|
test/test_delete.py
|
100loto/Mantis
|
e4d4a4627354f77c7a1e1e33a42a1aede3a2f505
|
[
"Apache-2.0"
] | null | null | null |
test/test_delete.py
|
100loto/Mantis
|
e4d4a4627354f77c7a1e1e33a42a1aede3a2f505
|
[
"Apache-2.0"
] | null | null | null |
test/test_delete.py
|
100loto/Mantis
|
e4d4a4627354f77c7a1e1e33a42a1aede3a2f505
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
from model.project import Project
def test_delete_project(app, soap):
if len(app.project.get_list()) == 0:
app.project.create(Project(name=random_string("project_of", 10), description=random_string("description:", 50)))
old_projects = soap.get_list()
project = random.choice(old_projects)
app.project.delete_project_by_name(project)
new_projects = soap.get_list()
assert len(old_projects) - 1 == len(new_projects)
old_projects.remove(project)
assert old_projects == new_projects
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
| 34.857143
| 120
| 0.734973
|
11f581b59822f74735d3a59c6f10a27becf4245f
| 254
|
py
|
Python
|
predavanje3/uvjetna_naredba_if_elif_else1.py
|
Miillky/uvod_u_programiranje
|
209611e38c8fe84c727649df4b868a4278eb77c3
|
[
"MIT"
] | null | null | null |
predavanje3/uvjetna_naredba_if_elif_else1.py
|
Miillky/uvod_u_programiranje
|
209611e38c8fe84c727649df4b868a4278eb77c3
|
[
"MIT"
] | null | null | null |
predavanje3/uvjetna_naredba_if_elif_else1.py
|
Miillky/uvod_u_programiranje
|
209611e38c8fe84c727649df4b868a4278eb77c3
|
[
"MIT"
] | null | null | null |
print('Program za provjeru unesenog broj:')
broj = int(input("Unesi priroda broj:"))
if broj<0:
print('Broj {0} je negativan'.format(broj))
elif broj==0:
print('Broj {0} je nula'.format(broj))
else:
print('Broj {0} je pozitivan'.format(broj))
| 31.75
| 47
| 0.669291
|
63800c7bae35b9e962faf9082ff0257c67e78ef3
| 5,877
|
py
|
Python
|
neolib/http/Page.py
|
kemcbride/Neolib
|
a3e61cef74328aa10a757155eb930b18ccad7bae
|
[
"MIT"
] | null | null | null |
neolib/http/Page.py
|
kemcbride/Neolib
|
a3e61cef74328aa10a757155eb930b18ccad7bae
|
[
"MIT"
] | 10
|
2017-08-30T18:34:33.000Z
|
2020-06-25T11:17:17.000Z
|
neolib/http/Page.py
|
kemcbride/Neolib
|
a3e61cef74328aa10a757155eb930b18ccad7bae
|
[
"MIT"
] | null | null | null |
""":mod:`Page` -- Provides an interface for HTTP communicating and HTML parsing
.. module:: Page
:synopsis: Provides an interface for HTTP communicating and HTML parsing
.. moduleauthor:: Joshua Gilman <joshuagilman@gmail.com>
"""
from neolib.http.HTTPForm import HTTPForm
from bs4 import BeautifulSoup
import requests
class Page(BeautifulSoup):
"""Represents an HTML web page
Provides an interface for handling an HTTP web page by subclassing the popular
HTML parsing library, BeautifulSoup, to allow for easy page exploration and utilizing
the Requests library for handling HTTP requests. This class aims to intertwine both
popular libraries to create one accessible class.
Attributes
resp (Response) -- A Requests Response object representing the HTTP Response
request (Request) -- A Requests Request object representing the HTTP Request
header (dict) -- All HTTP Response Header Variables
content (str) -- Page content
url (str) -- Page URL
postData(dict) -- POST data Page was initialized with
vars (dict) -- Additional HTTP Request Header variables Page was intiialized with
usr (User) -- User that initialized the page
Initialization
Page(url, usr=None, session=None, postData=None, vars=None, proxy=None)
Requests a remote document and initializes Page with the data
Either uses the supplied Requests session or uses a new Requests session to request
a remote document using the given parameters. Uses the received content to load basic
page details and initiate the parent class, BeautifulSoup.
Parameters
url (str) -- Remote URL address of the page
usr (User) -- Optional user to initiate the page with
session (request-client) -- Requests session to use with making the request
postData (dict) -- POST data {name: 'value'} sent with HTTP Request
vars (dict) -- HTTP Request variables {name: 'value'} sent with HTTP Request
proxy (tuple) -- Proxy host and port to connect with
Example
>>> pg = Page("http://www.neopets.com/index.phtml")
>>> pg.content
<!DOCTYPE HTML PUBLIC> .....
>>> pg.a
<a href="http://www.petpetpark.com/">petpet park</a>
"""
_wrapper = None
resp = None
request = None
header = None
content = ""
url = ""
postData = None
vars = None
usr = None
_defaultVars = {"USER-AGENT": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:14.0) Gecko/20100101 Firefox/14.0.1",
"ACCEPT": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"ACCEPT-LANGUAGE": "en-us,en;q=0.5"}
def __init__(self, url, usr=None, session=None, postData=None, vars=None, proxy=None):
self.url = url
self.postData = postData
self.vars = vars
if not session and not usr:
if postData:
r = requests.post(url, data=postData, headers=vars, proxies=proxy)
else:
r = requests.get(url, headers=vars, proxies=proxy)
elif usr:
if postData:
r = usr.session.post(url, data=postData, headers=vars, proxies=proxy)
else:
r = usr.session.get(url, headers=vars, proxies=proxy)
elif session:
if postData:
r = session.post(url, data=postData, headers=vars, proxies=proxy)
else:
r = session.get(url, headers=vars, proxies=proxy)
self.resp = r
self.request = r.request
self.header = r.headers
self.content = r.text
self.usr = usr
if "text/html" in r.headers['content-type']:
BeautifulSoup.__init__(self, r.content, "html.parser")
else:
self.content = r.content
def form(self, usr=None, **kwargs):
""" Returns an HTTPForm that matches the given criteria
Searches for an HTML form in the page's content matching the criteria
specified in kwargs. If a form is found, returns an HTTPForm instance
that represents the form.
Parameters:
usr (User) -- User to associate with the form (used in HTTPForm.submit()
**kwargs (dict) -- Data to search for the form with (I.E action='blah.phtml')
Returns
HTTPForm - Represents the HTML form
"""
if self.usr: usr = self.usr
if self.find("form", kwargs):
return HTTPForm(usr, self.url, self.find("form", kwargs))
@staticmethod
def newSession():
""" Returns a new Requests session with pre-loaded default HTTP Headers
Generates a new Requests session and consults with the Configuration class
to determine if a Configuration exists and attempts to use the configured
HTTP Request headers first. If this fails, it attempts to create a new
default configuration and use those values. Finally, if a configuration
cannot be initiaized it uses the hard-coded Mozilla headers.
Returns
request-client - The configured Requests session
Raises
HTTPException
"""
from neolib.config.Configuration import Configuration
s = requests.session()
if not Configuration.loaded():
if not Configuration.initialize():
s.headers.update(Page._defaultVars)
else:
s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict())
else:
s.headers.update(Configuration.getConfig().core.HTTPHeaders.toDict())
return requests.session()
| 38.92053
| 111
| 0.61494
|
0ef52cb7a1410bb3356fde70997ea8b57eba4913
| 2,435
|
py
|
Python
|
src/hsd/eventhandler.py
|
aradi/hsd-python
|
6d9fae3536da99aafff928d96c6b36b9a9c05847
|
[
"BSD-2-Clause"
] | null | null | null |
src/hsd/eventhandler.py
|
aradi/hsd-python
|
6d9fae3536da99aafff928d96c6b36b9a9c05847
|
[
"BSD-2-Clause"
] | null | null | null |
src/hsd/eventhandler.py
|
aradi/hsd-python
|
6d9fae3536da99aafff928d96c6b36b9a9c05847
|
[
"BSD-2-Clause"
] | null | null | null |
#--------------------------------------------------------------------------------------------------#
# hsd-python: package for manipulating HSD-formatted data in Python #
# Copyright (C) 2011 - 2021 DFTB+ developers group
# # BSD 2-clause license.
#
#--------------------------------------------------------------------------------------------------#
#
"""
Contains an event handler base class.
"""
from abc import ABC, abstractmethod
from typing import Optional
class HsdEventHandler(ABC):
"""Abstract base class for handling HSD events."""
@abstractmethod
def open_tag(self, tagname: str, attrib: Optional[str],
hsdattrib: Optional[dict]):
"""Opens a tag.
Args:
tagname: Name of the tag which had been opened.
attrib: String containing the attribute of the tag or None.
hsdattrib: Dictionary of the options created during the processing
in the hsd-parser.
"""
@abstractmethod
def close_tag(self, tagname: str):
"""Closes a tag.
Args:
tagname: Name of the tag which had been closed.
"""
@abstractmethod
def add_text(self, text: str):
"""Adds text (data) to the current tag.
Args:
text: Text in the current tag.
"""
class HsdEventPrinter(HsdEventHandler):
"""Minimal demonstration class for event handlers.
This specifc implemenation prints the events. Subclassing instances
should override the public methods to customize its behavior.
"""
def __init__(self):
"""Initializes the default event printer."""
self._indentlevel = 0
self._indentstr = " "
def open_tag(self, tagname: str, attrib: str, hsdattrib: dict):
indentstr = self._indentlevel * self._indentstr
print(f"{indentstr}OPENING TAG: {tagname}")
print(f"{indentstr}ATTRIBUTE: {attrib}")
print(f"{indentstr}HSD ATTRIBUTE: {str(hsdattrib)}")
self._indentlevel += 1
def close_tag(self, tagname: str):
self._indentlevel -= 1
indentstr = self._indentlevel * self._indentstr
print(f"{indentstr}CLOSING TAG: {tagname}")
def add_text(self, text: str):
indentstr = self._indentlevel * self._indentstr
print(f"{indentstr}Received text: {text}")
| 30.822785
| 100
| 0.55729
|
1eb1188ae412c42a97546704a770be85c53e6331
| 3,755
|
py
|
Python
|
manim/config.py
|
mykytarudenko/manim
|
08678f7b3edc376932493f53e416d310666ed3b7
|
[
"MIT"
] | 3
|
2020-10-19T18:16:22.000Z
|
2021-05-09T14:49:58.000Z
|
manim/config.py
|
Zoravar-python/News-API
|
08678f7b3edc376932493f53e416d310666ed3b7
|
[
"MIT"
] | null | null | null |
manim/config.py
|
Zoravar-python/News-API
|
08678f7b3edc376932493f53e416d310666ed3b7
|
[
"MIT"
] | null | null | null |
"""
config.py
---------
Process the manim.cfg file and the command line arguments into a single
config object.
"""
import os
import sys
import colour
from . import constants
from .utils.config_utils import _run_config, _init_dirs, _from_command_line
from .logger import logger
from .utils.tex import TexTemplate, TexTemplateFromFile
__all__ = ["file_writer_config", "config", "camera_config"]
def _parse_config(config_parser, args):
"""Parse config files and CLI arguments into a single dictionary."""
# By default, use the CLI section of the digested .cfg files
default = config_parser["CLI"]
# Handle the *_quality flags. These determine the section to read
# and are stored in 'camera_config'. Note the highest resolution
# passed as argument will be used.
for flag in ["fourk_quality", "high_quality", "medium_quality", "low_quality"]:
if getattr(args, flag):
section = config_parser[flag]
break
else:
section = config_parser["CLI"]
config = {opt: section.getint(opt) for opt in config_parser[flag]}
config["default_pixel_height"] = default.getint("pixel_height")
config["default_pixel_width"] = default.getint("pixel_width")
# The -r, --resolution flag overrides the *_quality flags
if args.resolution is not None:
if "," in args.resolution:
height_str, width_str = args.resolution.split(",")
height, width = int(height_str), int(width_str)
else:
height, width = int(args.resolution), int(16 * height / 9)
config["camera_config"].update({"pixel_height": height, "pixel_width": width})
# Handle the -c (--background_color) flag
if args.background_color is not None:
try:
background_color = colour.Color(args.background_color)
except AttributeError as err:
logger.warning("Please use a valid color.")
logger.error(err)
sys.exit(2)
else:
background_color = colour.Color(default["background_color"])
config["background_color"] = background_color
# Set the rest of the frame properties
config["frame_height"] = 8.0
config["frame_width"] = (
config["frame_height"] * config["pixel_width"] / config["pixel_height"]
)
config["frame_y_radius"] = config["frame_height"] / 2
config["frame_x_radius"] = config["frame_width"] / 2
config["top"] = config["frame_y_radius"] * constants.UP
config["bottom"] = config["frame_y_radius"] * constants.DOWN
config["left_side"] = config["frame_x_radius"] * constants.LEFT
config["right_side"] = config["frame_x_radius"] * constants.RIGHT
# Handle the --tex_template flag. Note we accept None if the flag is absent
tex_fn = os.path.expanduser(args.tex_template) if args.tex_template else None
if tex_fn is not None and not os.access(tex_fn, os.R_OK):
# custom template not available, fallback to default
logger.warning(
f"Custom TeX template {tex_fn} not found or not readable. "
"Falling back to the default template."
)
tex_fn = None
config["tex_template_file"] = tex_fn
config["tex_template"] = (
TexTemplateFromFile(filename=tex_fn) if tex_fn is not None else TexTemplate()
)
return config
args, config_parser, file_writer_config, successfully_read_files = _run_config()
logger.setLevel(file_writer_config["verbosity"])
if _from_command_line():
logger.debug(
f"Read configuration files: {[os.path.abspath(cfgfile) for cfgfile in successfully_read_files]}"
)
if not (hasattr(args, "subcommands")):
_init_dirs(file_writer_config)
config = _parse_config(config_parser, args)
camera_config = config
| 37.55
| 104
| 0.681225
|
e5dd01dbafa8744f64e6c13663974d624442c04c
| 356
|
py
|
Python
|
Chapter02/Exercise2.08/exercise2.08.py
|
lmoshood/The-Django-Workshop
|
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
|
[
"MIT"
] | null | null | null |
Chapter02/Exercise2.08/exercise2.08.py
|
lmoshood/The-Django-Workshop
|
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
|
[
"MIT"
] | null | null | null |
Chapter02/Exercise2.08/exercise2.08.py
|
lmoshood/The-Django-Workshop
|
52e86a8f93cb38bf70d50e9b8d2c6d7dac416f62
|
[
"MIT"
] | 1
|
2020-05-27T13:41:58.000Z
|
2020-05-27T13:41:58.000Z
|
#!/usr/bin/env python3
from reviews.models import Contributor
Contributor.objects.create(first_names='Peter', last_names='Wharton', email='PeterWharton@example.com')
Contributor.objects.create(first_names='Peter', last_names='Tyrrell', email='PeterTyrrell@example.com')
contributors = Contributor.objects.filter(first_names='Peter')
print(contributors)
| 35.6
| 103
| 0.80618
|
9ab777829a90618cdaec9dcb974f04bd79f16e52
| 1,037
|
py
|
Python
|
peeringdb/tests/test_get_asn.py
|
mihalysz/peeringdb-py
|
941902dd6829121134f56f20123ea16b1718ed3a
|
[
"Apache-2.0"
] | 2
|
2015-08-25T13:46:41.000Z
|
2020-03-22T19:19:13.000Z
|
peeringdb/tests/test_get_asn.py
|
plajjan/peeringdb-py
|
941902dd6829121134f56f20123ea16b1718ed3a
|
[
"Apache-2.0"
] | null | null | null |
peeringdb/tests/test_get_asn.py
|
plajjan/peeringdb-py
|
941902dd6829121134f56f20123ea16b1718ed3a
|
[
"Apache-2.0"
] | 2
|
2016-11-09T23:13:50.000Z
|
2020-12-10T06:06:29.000Z
|
#!/usr/bin/env python
import unittest
from peeringdb.PeeringDB import PeeringDB
class TestASN(unittest.TestCase):
PDB = None
def setUp(self):
self.PDB = PeeringDB(cache=False)
# Real networks that exist and have PeeringDB entries
def test_exists_768(self):
# JANET UK
asn = self.PDB.asn(2906)
self.assertIsNotNone(asn)
def test_exists_2906(self):
# Netflix
asn = self.PDB.asn(2906)
self.assertIsNotNone(asn)
# Reserved, private or documentation ASNs
def test_shouldnt_exist_0(self):
asn = self.PDB.asn(0)
self.assertIsNone(asn)
def test_shouldnt_exist_64496(self):
asn = self.PDB.asn(64496)
self.assertIsNone(asn)
# Check the returned object is as expected
def test_check_returned_asn_2906(self):
# Netflix, make sure the ASN we get back in the object is the same
asn = self.PDB.asn(2906)
self.assertEqual(asn["asn"], 2906)
if __name__ == '__main__':
unittest.main()
| 23.044444
| 74
| 0.650916
|
b3c99c5c2cf8b22dfeab8f8ae85299bca0beb91f
| 648
|
py
|
Python
|
django_meetups/meetups/migrations/0007_auto_20211125_0658.py
|
yaswanthsaivendra/django_projects
|
9c4b3511d32eb3bfd0825480a5dd021bd865b16b
|
[
"MIT"
] | 3
|
2021-11-08T18:26:26.000Z
|
2021-12-11T18:30:50.000Z
|
django_meetups/meetups/migrations/0007_auto_20211125_0658.py
|
yaswanthsaivendra/django_projects
|
9c4b3511d32eb3bfd0825480a5dd021bd865b16b
|
[
"MIT"
] | null | null | null |
django_meetups/meetups/migrations/0007_auto_20211125_0658.py
|
yaswanthsaivendra/django_projects
|
9c4b3511d32eb3bfd0825480a5dd021bd865b16b
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-11-25 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('meetups', '0006_auto_20211125_0650'),
]
operations = [
migrations.AddField(
model_name='meetup',
name='date',
field=models.DateField(default='2021-04-12'),
preserve_default=False,
),
migrations.AddField(
model_name='meetup',
name='organizer_email',
field=models.EmailField(default='test@test.com', max_length=254),
preserve_default=False,
),
]
| 24.923077
| 77
| 0.58179
|
8d4f415f3a1ed5903db7b9834b83a78e8ba98f26
| 5,079
|
py
|
Python
|
model_compression_toolkit/hardware_models/keras_hardware_model/keras_tflite.py
|
isabella232/model_optimization
|
074d1dfd8b4d18e57c6186c0ec5e49eb17a0fc7a
|
[
"Apache-2.0"
] | null | null | null |
model_compression_toolkit/hardware_models/keras_hardware_model/keras_tflite.py
|
isabella232/model_optimization
|
074d1dfd8b4d18e57c6186c0ec5e49eb17a0fc7a
|
[
"Apache-2.0"
] | null | null | null |
model_compression_toolkit/hardware_models/keras_hardware_model/keras_tflite.py
|
isabella232/model_optimization
|
074d1dfd8b4d18e57c6186c0ec5e49eb17a0fc7a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from keras import layers
from keras.layers import Conv2D, Dense, Reshape, ZeroPadding2D, \
MaxPooling2D, ReLU, AveragePooling2D, Activation, DepthwiseConv2D
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from model_compression_toolkit.common.hardware_representation import FrameworkHardwareModel
from model_compression_toolkit.common.hardware_representation.hardware2framework import OperationsSetToLayers, \
LayerFilterParams
from model_compression_toolkit.common.hardware_representation.hardware2framework.attribute_filter import Eq
from model_compression_toolkit.hardware_models.tflite import get_tflite_hw_model
def get_keras_hardware_model_tflite():
tflite_hm = get_tflite_hw_model()
tflite_keras = FrameworkHardwareModel(tflite_hm, name='tflite_keras')
with tflite_keras:
OperationsSetToLayers("PreserveQuantizationParams", [AveragePooling2D,
tf.nn.avg_pool2d,
layers.Concatenate,
tf.concat,
MaxPooling2D,
layers.Multiply,
tf.multiply,
Reshape,
tf.reshape,
LayerFilterParams(tf.image.resize,
method=ResizeMethod.BILINEAR),
tf.nn.space_to_depth,
ZeroPadding2D,
tf.gather,
tf.compat.v1.batch_to_space_nd,
tf.space_to_batch_nd,
tf.transpose,
tf.maximum,
layers.Maximum,
tf.minimum,
layers.Minimum,
tf.pad,
tf.slice,
layers.SlicingOpLambda])
OperationsSetToLayers("FullyConnected", [Dense])
OperationsSetToLayers("L2Normalization", [tf.math.l2_normalize])
OperationsSetToLayers("LogSoftmax", [tf.nn.log_softmax])
OperationsSetToLayers("Tanh", [tf.nn.tanh,
LayerFilterParams(Activation, activation="tanh")])
OperationsSetToLayers("Softmax", [tf.nn.softmax,
layers.Softmax,
LayerFilterParams(Activation, activation="softmax")])
OperationsSetToLayers("Logistic", [tf.sigmoid,
LayerFilterParams(Activation, activation="sigmoid")])
OperationsSetToLayers("Conv2d", [Conv2D])
OperationsSetToLayers("DepthwiseConv2D", [DepthwiseConv2D])
OperationsSetToLayers("Relu", [tf.nn.relu,
tf.nn.relu6,
LayerFilterParams(ReLU, Eq("max_value", None) | Eq("max_value", 6)),
LayerFilterParams(Activation, activation="relu")])
OperationsSetToLayers("Elu", [tf.nn.elu,
LayerFilterParams(Activation, activation="elu")])
OperationsSetToLayers("BatchNorm", [layers.BatchNormalization,
tf.nn.batch_normalization])
OperationsSetToLayers("Squeeze", [tf.squeeze])
OperationsSetToLayers("BiasAdd", [tf.nn.bias_add])
OperationsSetToLayers("Add", [tf.add,
layers.Add])
return tflite_keras
| 54.031915
| 112
| 0.485726
|
2e1a84f4ee69b6f1ae3d393c5584cc4c09735966
| 2,215
|
py
|
Python
|
Chapter08/10_init_hooks/my_library/models/library_book.py
|
PacktPublishing/-Odoo-13-Development-Cookbook-Fouth-Edition
|
fb75412e1ff13e1a1b3233f06de425c9df18aca7
|
[
"MIT"
] | 125
|
2020-11-28T18:00:34.000Z
|
2022-03-07T17:53:22.000Z
|
Chapter08/10_init_hooks/my_library/models/library_book.py
|
PacktPublishing/-Odoo-13-Development-Cookbook-Fouth-Edition
|
fb75412e1ff13e1a1b3233f06de425c9df18aca7
|
[
"MIT"
] | 5
|
2021-02-02T10:03:29.000Z
|
2022-03-16T07:32:28.000Z
|
Chapter08/10_init_hooks/my_library/models/library_book.py
|
PacktPublishing/-Odoo-13-Development-Cookbook-Fouth-Edition
|
fb75412e1ff13e1a1b3233f06de425c9df18aca7
|
[
"MIT"
] | 182
|
2020-11-29T12:07:07.000Z
|
2022-03-22T04:27:51.000Z
|
# -*- coding: utf-8 -*-
from odoo import models, fields, _
from odoo.exceptions import UserError
from odoo.tests.common import Form
import logging
_logger = logging.getLogger(__name__)
class LibraryBook(models.Model):
_name = 'library.book'
_description = 'Library Book'
name = fields.Char('Title', required=True)
date_release = fields.Date('Release Date')
active = fields.Boolean(default=True)
author_ids = fields.Many2many('res.partner', string='Authors')
state = fields.Selection(
[('available', 'Available'),
('borrowed', 'Borrowed'),
('lost', 'Lost')],
'State', default="available")
cost_price = fields.Float('Book Cost')
category_id = fields.Many2one('library.book.category')
def make_available(self):
self.ensure_one()
self.state = 'available'
def make_borrowed(self):
self.ensure_one()
self.state = 'borrowed'
def make_lost(self):
self.ensure_one()
self.state = 'lost'
if not self.env.context.get('avoid_deactivate'):
self.active = False
def book_rent(self):
self.ensure_one()
if self.state != 'available':
raise UserError(_('Book is not available for renting'))
rent_as_superuser = self.env['library.book.rent'].sudo()
rent_as_superuser.create({
'book_id': self.id,
'borrower_id': self.env.user.partner_id.id,
})
def average_book_occupation(self):
self.flush()
sql_query = """
SELECT
lb.name,
avg((EXTRACT(epoch from age(return_date, rent_date)) / 86400))::int
FROM
library_book_rent AS lbr
JOIN
library_book as lb ON lb.id = lbr.book_id
WHERE lbr.state = 'returned'
GROUP BY lb.name;"""
self.env.cr.execute(sql_query)
result = self.env.cr.fetchall()
_logger.info("Average book occupation: %s", result)
def return_all_books(self):
self.ensure_one()
wizard = self.env['library.return.wizard']
wizard.create({'borrower_id': self.env.user.partner_id.id}).books_returns()
| 30.763889
| 83
| 0.602257
|
23b4ff256f39df8a1117c01039dda05f07632397
| 4,870
|
py
|
Python
|
sentinelhub/testing_utils.py
|
HenryKobin/sentinelhub-py
|
d3b74201cbb613f6866e75e19527339967b9a2e1
|
[
"MIT"
] | null | null | null |
sentinelhub/testing_utils.py
|
HenryKobin/sentinelhub-py
|
d3b74201cbb613f6866e75e19527339967b9a2e1
|
[
"MIT"
] | null | null | null |
sentinelhub/testing_utils.py
|
HenryKobin/sentinelhub-py
|
d3b74201cbb613f6866e75e19527339967b9a2e1
|
[
"MIT"
] | null | null | null |
"""
Utility tools for writing unit tests for packages which rely on `sentinelhub-py`
"""
import unittest
import os
import shutil
import logging
import inspect
import numpy as np
from .config import SHConfig
class TestSentinelHub(unittest.TestCase):
""" Class implementing common functionalities of unit tests for working with `sentinelhub-py` package:
- reading configuration parameters from environmental variables and saving them to `config.json`,
- setting logger,
- handling input and output data folders,
- method for testing statistics of a numpy data array.
"""
CONFIG = None
LOGGER = None
INPUT_FOLDER = None
OUTPUT_FOLDER = None
INPUT_FOLDER = None
OUTPUT_FOLDER = None
CLEAR_OUTPUTS = True
@classmethod
def setUpClass(cls):
""" A general set up class
Use ``super().setUpClass()`` in every class which inherits ``TestSentinelHub``
"""
if cls.__name__ == TestSentinelHub.__name__:
return
cls.INPUT_FOLDER = os.path.join(os.path.dirname(inspect.getsourcefile(cls)), 'TestInputs')
cls.OUTPUT_FOLDER = os.path.join(os.path.dirname(inspect.getsourcefile(cls)), 'TestOutputs')
if cls.CONFIG is None:
cls.CONFIG = cls._config_with_environment_variables()
if cls.LOGGER is None:
logging.basicConfig(level=logging.INFO,
format='%(asctime)-15s %(module)s:%(lineno)d [%(levelname)s] %(funcName)s %(message)s')
cls.LOGGER = logging.getLogger(__name__)
@staticmethod
def _config_with_environment_variables():
""" Reads configuration parameters from environmental variables
"""
config = SHConfig()
for param in config.get_params():
env_variable = param.upper()
if os.environ.get(env_variable):
setattr(config, param, os.environ.get(env_variable))
config.save()
return config
@classmethod
def tearDownClass(cls):
if cls.CLEAR_OUTPUTS and cls.OUTPUT_FOLDER:
shutil.rmtree(cls.OUTPUT_FOLDER, ignore_errors=True)
def test_numpy_data(self, data=None, exp_shape=None, exp_dtype=None, exp_min=None, exp_max=None, exp_mean=None,
exp_median=None, delta=None, test_name=''):
""" Validates basic statistics of data array
:param data: Data array
:type data: numpy.ndarray
:param exp_shape: Expected shape
:type exp_shape: tuple(int)
:param exp_dtype: Expected dtype
:type exp_dtype: numpy.dtype
:param exp_min: Expected minimal value
:type exp_min: float
:param exp_max: Expected maximal value
:type exp_max: float
:param exp_mean: Expected mean value
:type exp_mean: float
:param exp_median: Expected median value
:type exp_median: float
:param delta: Precision of validation. If not set, it will be set automatically
:type delta: float
:param test_name: Name of the test case
:type test_name: str
"""
if data is None:
return
if delta is None:
delta = 1e-1 if np.issubdtype(data.dtype, np.integer) else 1e-4
for exp_stat, stat_val, stat_name in [(exp_shape, data.shape, 'shape'), (exp_dtype, data.dtype, 'dtype')]:
if exp_stat is not None:
with self.subTest(msg='Test case {}'.format(test_name)):
self.assertEqual(stat_val, exp_stat,
msg='Expected {} {}, got {}'.format(stat_name, exp_stat, stat_val))
data = data[~np.isnan(data)]
for exp_stat, stat_func, stat_name in [(exp_min, np.amin, 'min'), (exp_max, np.amax, 'max'),
(exp_mean, np.mean, 'mean'), (exp_median, np.median, 'median')]:
if exp_stat is not None:
stat_val = stat_func(data)
with self.subTest(msg='Test case {}'.format(test_name)):
self.assertAlmostEqual(stat_val, exp_stat, delta=delta,
msg='Expected {} {}, got {}'.format(stat_name, exp_stat, stat_val))
class TestCaseContainer:
""" Class for storing expected statistics for a single test case
:param name: Name of a test case
:type name: str
:param request: A class which provides the data for testing
:type request: object
:param stats: Any other parameters
"""
def __init__(self, name, request, **stats):
self.name = name
self.request = request
for stat_name, stat_value in stats.items():
setattr(self, stat_name, stat_value)
def __getattr__(self, key):
""" Fallback if the attribute is missing - in that case `None` is returned
"""
| 36.343284
| 120
| 0.619918
|
6086db778782715034bf17e4bff4dc8115c696f8
| 1,201
|
py
|
Python
|
custom/ilsgateway/tests/test_product_sync.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1
|
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
custom/ilsgateway/tests/test_product_sync.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
custom/ilsgateway/tests/test_product_sync.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import os
from django.test import TestCase
from corehq.apps.commtrack.models import Product as Prod
from corehq.apps.commtrack.tests.util import bootstrap_domain as initial_bootstrap
from custom.ilsgateway.api import Product
from custom.ilsgateway.commtrack import sync_ilsgateway_product
TEST_DOMAIN = 'ilsgateway-commtrack-product-test'
class ProductSyncTest(TestCase):
def setUp(self):
self.datapath = os.path.join(os.path.dirname(__file__), 'data')
initial_bootstrap(TEST_DOMAIN)
for product in Prod.by_domain(TEST_DOMAIN):
product.delete()
def test_create_product(self):
with open(os.path.join(self.datapath, 'sample_product.json')) as f:
product = Product.from_json(json.loads(f.read()))
self.assertEqual(0, len(Prod.by_domain(TEST_DOMAIN)))
ilsgateway_product = sync_ilsgateway_product(TEST_DOMAIN, product)
self.assertEqual(product.sms_code, ilsgateway_product.code.lower())
self.assertEqual(product.name, ilsgateway_product.name)
self.assertEqual(product.description, ilsgateway_product.description)
self.assertEqual(product.units, str(ilsgateway_product.unit))
| 40.033333
| 82
| 0.750208
|
9d38a9639676a7a5b69ece3acff461f2998e76f8
| 2,525
|
py
|
Python
|
generic/scrapper.py
|
tushortz/scrapper
|
8b150f55d5c4e13264de7a03cbb7953e19645f3c
|
[
"MIT"
] | 2
|
2018-06-10T21:02:43.000Z
|
2021-11-06T11:51:12.000Z
|
generic/scrapper.py
|
tushortz/simple-scrapper
|
8b150f55d5c4e13264de7a03cbb7953e19645f3c
|
[
"MIT"
] | null | null | null |
generic/scrapper.py
|
tushortz/simple-scrapper
|
8b150f55d5c4e13264de7a03cbb7953e19645f3c
|
[
"MIT"
] | null | null | null |
# Taiwo Kareem
# 05/04/2018 04:09 AM
import sys
import requests
import re
import time
import os
import json
from urllib.parse import urljoin # Python3
if len(sys.argv) != 2:
sys.exit('Usage: python %s "<config_filename>"' % __file__)
config_file = sys.argv[1]
config = {}
if os.path.isfile(config_file):
with open(config_file) as f:
try:
config = json.load(f)
except json.decoder.JSONDecodeError as err:
sys.exit("ERROR: Invalid json data in file. %s" % err)
else:
sys.exit("Config file: '%s' not found" % config_file)
URL = config.get("domain")
filename = config.get("output_filename")
URL_REGEX = config.get("path_regex")
KEYWORD_REGEX = config.get("keyword_regex")
AUTH = config.get("login")
s = requests.Session()
s.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:27.0) Gecko/20100101 Firefox/27.0',
})
if AUTH and AUTH != "username:password":
AUTH = tuple(AUTH.split(":"))
s.auth = AUTH
s.post(URL, auth=AUTH)
opened=[]
visited=[]
hits=0
mode="w"
MATCHES = []
if os.path.isfile(filename):
with open(filename) as f:
visited = f.read().split("\n")
mode = "a"
with open(filename, mode) as f:
def process(url, visited=visited, hits=hits, s=s):
LINKS = []
page_crawled = False
for pages in opened:
if pages == url:
page_crawled = True
if page_crawled == False:
opened.append(url)
text = s.get(url).text
for link in re.findall(r'href="(.*?)"', text):
link = urljoin(url, link).split("#")[0]#.split("?")[0]
LINKS.append(link.lower())
for link in list(set(LINKS)):
if link.startswith(URL):
if link not in visited:
if re.search(URL_REGEX, link, re.I):
source = s.get(link).text
# ([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)
matches = set(re.findall(r"{0}".format(KEYWORD_REGEX), source, re.I))
if matches:
hits += 1
print("\n[%s] (%s/%s) -> %s" % (len(matches), hits, len(visited), link))
else:
matches = []
for email in matches:
if email not in MATCHES:
print(email.lower())
f.write(email.lower() + "\n")
f.flush()
MATCHES.append(email)
try:
visited.append(link)
except:
time.sleep(3)
else:
print(".", end="", flush=True)
try:
process(link, hits=hits)
except Exception as e:
time.sleep(3)
print("\n--", e)
try:
process(URL, hits=hits)
except Exception as e:
print(e)
time.sleep(3)
| 20.696721
| 102
| 0.601584
|
de9e0954979762eb743aedccddb1bc784505ff21
| 1,457
|
py
|
Python
|
napari/layers/_tests/test_source.py
|
MaksHess/napari
|
64a144607342c02177fc62fa83a3442ace0a98e7
|
[
"BSD-3-Clause"
] | 1,345
|
2019-03-03T21:14:14.000Z
|
2022-03-31T19:46:39.000Z
|
napari/layers/_tests/test_source.py
|
MaksHess/napari
|
64a144607342c02177fc62fa83a3442ace0a98e7
|
[
"BSD-3-Clause"
] | 3,904
|
2019-03-02T01:30:24.000Z
|
2022-03-31T20:17:27.000Z
|
napari/layers/_tests/test_source.py
|
MaksHess/napari
|
64a144607342c02177fc62fa83a3442ace0a98e7
|
[
"BSD-3-Clause"
] | 306
|
2019-03-29T17:09:10.000Z
|
2022-03-30T09:54:11.000Z
|
from napari.layers import Points
from napari.layers._source import Source, current_source, layer_source
def test_layer_source():
"""Test basic layer source assignment mechanism"""
with layer_source(path='some_path', reader_plugin='builtins'):
points = Points()
assert points.source == Source(path='some_path', reader_plugin='builtins')
def test_source_context():
"""Test nested contexts, overrides, and resets."""
assert current_source() == Source()
# everything created within this context will have this sample source
with layer_source(sample=('samp', 'name')):
assert current_source() == Source(sample=('samp', 'name'))
# nested contexts override previous ones
with layer_source(path='a', reader_plugin='plug'):
assert current_source() == Source(
path='a', reader_plugin='plug', sample=('samp', 'name')
)
# note the new path now...
with layer_source(path='b'):
assert current_source() == Source(
path='b', reader_plugin='plug', sample=('samp', 'name')
)
# as we exit the contexts, they should undo their assignments
assert current_source() == Source(
path='a', reader_plugin='plug', sample=('samp', 'name')
)
assert current_source() == Source(sample=('samp', 'name'))
assert current_source() == Source()
| 40.472222
| 78
| 0.611531
|
7263e9e3bb8d77827f94f14b5e3044a61eae6e09
| 619
|
py
|
Python
|
Hereditariedade_e_Polimorfismo/18-Inheritance-Examples.py
|
nnsdtr/OOP-Python
|
3b739966c9b35c32a2bd934574f6421b1470eb23
|
[
"MIT"
] | null | null | null |
Hereditariedade_e_Polimorfismo/18-Inheritance-Examples.py
|
nnsdtr/OOP-Python
|
3b739966c9b35c32a2bd934574f6421b1470eb23
|
[
"MIT"
] | null | null | null |
Hereditariedade_e_Polimorfismo/18-Inheritance-Examples.py
|
nnsdtr/OOP-Python
|
3b739966c9b35c32a2bd934574f6421b1470eb23
|
[
"MIT"
] | null | null | null |
class Animal(object):
def __init__(self, name):
self.name = name
def eat(self, food):
print('{0} is eating {1}'.format(self.name, food))
class Dog(Animal):
def fetch(self, thing):
print('{0} goes after the {1}'.format(self.name, thing))
class Cat(Animal):
def swat_string(self):
print('{} shreds the string!'.format(self.name))
minigato = Cat('Minigato')
jujuba = Dog('Jujuba')
jujuba.fetch('ball')
minigato.swat_string()
minigato.eat('cat food')
jujuba.eat('dog food')
# jujuba.swat_string() # AttributeError: 'Dog' object has no attribute 'swat_string'
| 18.757576
| 86
| 0.641357
|
8418992ac5dea98b0ca98fb940e74b61ae4e8a65
| 4,533
|
py
|
Python
|
homeassistant/components/waterfurnace/sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/waterfurnace/sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/waterfurnace/sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Support for Waterfurnace."""
from __future__ import annotations
from homeassistant.components.sensor import (
ENTITY_ID_FORMAT,
SensorDeviceClass,
SensorEntity,
)
from homeassistant.const import PERCENTAGE, POWER_WATT, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import slugify
from . import DOMAIN as WF_DOMAIN, UPDATE_TOPIC
class WFSensorConfig:
"""Water Furnace Sensor configuration."""
def __init__(
self,
friendly_name,
field,
icon="mdi:gauge",
unit_of_measurement=None,
device_class=None,
):
"""Initialize configuration."""
self.device_class = device_class
self.friendly_name = friendly_name
self.field = field
self.icon = icon
self.unit_of_measurement = unit_of_measurement
SENSORS = [
WFSensorConfig("Furnace Mode", "mode"),
WFSensorConfig("Total Power", "totalunitpower", "mdi:flash", POWER_WATT),
WFSensorConfig(
"Active Setpoint",
"tstatactivesetpoint",
None,
TEMP_FAHRENHEIT,
SensorDeviceClass.TEMPERATURE,
),
WFSensorConfig(
"Leaving Air",
"leavingairtemp",
None,
TEMP_FAHRENHEIT,
SensorDeviceClass.TEMPERATURE,
),
WFSensorConfig(
"Room Temp",
"tstatroomtemp",
None,
TEMP_FAHRENHEIT,
SensorDeviceClass.TEMPERATURE,
),
WFSensorConfig("Loop Temp", "enteringwatertemp", None, TEMP_FAHRENHEIT),
WFSensorConfig(
"Humidity Set Point", "tstathumidsetpoint", "mdi:water-percent", PERCENTAGE
),
WFSensorConfig(
"Humidity", "tstatrelativehumidity", "mdi:water-percent", PERCENTAGE
),
WFSensorConfig("Compressor Power", "compressorpower", "mdi:flash", POWER_WATT),
WFSensorConfig("Fan Power", "fanpower", "mdi:flash", POWER_WATT),
WFSensorConfig("Aux Power", "auxpower", "mdi:flash", POWER_WATT),
WFSensorConfig("Loop Pump Power", "looppumppower", "mdi:flash", POWER_WATT),
WFSensorConfig("Compressor Speed", "actualcompressorspeed", "mdi:speedometer"),
WFSensorConfig("Fan Speed", "airflowcurrentspeed", "mdi:fan"),
]
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Waterfurnace sensor."""
if discovery_info is None:
return
sensors = []
client = hass.data[WF_DOMAIN]
for sconfig in SENSORS:
sensors.append(WaterFurnaceSensor(client, sconfig))
add_entities(sensors)
class WaterFurnaceSensor(SensorEntity):
"""Implementing the Waterfurnace sensor."""
def __init__(self, client, config):
"""Initialize the sensor."""
self.client = client
self._name = config.friendly_name
self._attr = config.field
self._state = None
self._icon = config.icon
self._unit_of_measurement = config.unit_of_measurement
self._attr_device_class = config.device_class
# This ensures that the sensors are isolated per waterfurnace unit
self.entity_id = ENTITY_ID_FORMAT.format(
f"wf_{slugify(self.client.unit)}_{slugify(self._attr)}"
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return icon."""
return self._icon
@property
def native_unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, UPDATE_TOPIC, self.async_update_callback
)
)
@callback
def async_update_callback(self):
"""Update state."""
if self.client.data is not None:
self._state = getattr(self.client.data, self._attr, None)
self.async_write_ha_state()
| 29.627451
| 83
| 0.661372
|
d14ff0d35b1c7bc31f6ce170582bf57963b70c16
| 3,958
|
py
|
Python
|
projects/objectnav_baselines/experiments/robothor/objectnav_robothor_depth_resnetgru_ddppo.py
|
prithv1/allenact
|
ee736e6a3aeed29b3661ee18fa0dc0a68a40201e
|
[
"MIT"
] | 1
|
2020-09-10T13:09:14.000Z
|
2020-09-10T13:09:14.000Z
|
projects/objectnav_baselines/experiments/robothor/objectnav_robothor_depth_resnetgru_ddppo.py
|
andrlima/allenact
|
f29dd6f0ec62425b02ca07fee815b1a82627a28e
|
[
"MIT"
] | null | null | null |
projects/objectnav_baselines/experiments/robothor/objectnav_robothor_depth_resnetgru_ddppo.py
|
andrlima/allenact
|
f29dd6f0ec62425b02ca07fee815b1a82627a28e
|
[
"MIT"
] | null | null | null |
import gym
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torchvision import models
from core.algorithms.onpolicy_sync.losses import PPO
from core.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from projects.objectnav_baselines.experiments.robothor.objectnav_robothor_base import (
ObjectNavRoboThorBaseConfig,
)
from projects.objectnav_baselines.models.object_nav_models import (
ResnetTensorObjectNavActorCritic,
)
from plugins.ithor_plugin.ithor_sensors import GoalObjectTypeThorSensor
from plugins.habitat_plugin.habitat_preprocessors import ResnetPreProcessorHabitat
from plugins.robothor_plugin.robothor_sensors import DepthSensorRoboThor
from plugins.robothor_plugin.robothor_tasks import ObjectNavTask
from utils.experiment_utils import Builder, PipelineStage, TrainingPipeline, LinearDecay
class ObjectNavRoboThorRGBPPOExperimentConfig(ObjectNavRoboThorBaseConfig):
"""An Object Navigation experiment configuration in RoboThor with Depth
input."""
def __init__(self):
super().__init__()
self.ENV_ARGS["renderDepthImage"] = True
DepthSensorRoboThor(
height=self.SCREEN_SIZE,
width=self.SCREEN_SIZE,
use_resnet_normalization=True,
uuid="depth_lowres",
),
GoalObjectTypeThorSensor(object_types=self.TARGET_TYPES,),
self.PREPROCESSORS = [
Builder(
ResnetPreProcessorHabitat,
{
"input_height": self.SCREEN_SIZE,
"input_width": self.SCREEN_SIZE,
"output_width": 7,
"output_height": 7,
"output_dims": 512,
"pool": False,
"torchvision_resnet_model": models.resnet18,
"input_uuids": ["depth_lowres"],
"output_uuid": "depth_resnet",
"parallel": False, # TODO False for debugging
},
),
]
self.OBSERVATIONS = [
"depth_resnet",
"goal_object_type_ind",
]
@classmethod
def tag(cls):
return "Objectnav-RoboTHOR-Depth-ResNetGRU-DDPPO"
def training_pipeline(self, **kwargs):
ppo_steps = int(300000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 3
num_steps = 30
save_interval = 5000000
log_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"ppo_loss": Builder(PPO, kwargs={}, default=PPOConfig,)},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=self.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return ResnetTensorObjectNavActorCritic(
action_space=gym.spaces.Discrete(len(ObjectNavTask.class_action_names())),
observation_space=kwargs["observation_set"].observation_spaces,
goal_sensor_uuid="goal_object_type_ind",
rgb_resnet_preprocessor_uuid="rgb_resnet",
depth_resnet_preprocessor_uuid="depth_resnet",
hidden_size=512,
goal_dims=32,
)
| 35.981818
| 88
| 0.641233
|
fa06b086876dda3e42060d0cdab7627f7c304f00
| 6,367
|
py
|
Python
|
airflow/contrib/operators/kubernetes_pod_operator.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 1
|
2021-08-25T14:24:23.000Z
|
2021-08-25T14:24:23.000Z
|
airflow/contrib/operators/kubernetes_pod_operator.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | null | null | null |
airflow/contrib/operators/kubernetes_pod_operator.py
|
abhishek-ch/incubator-airflow
|
3358551c8e73d9019900f7a85f18ebfd88591450
|
[
"Apache-2.0"
] | 1
|
2021-08-25T14:24:27.000Z
|
2021-08-25T14:24:27.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.kubernetes import kube_client, pod_generator, pod_launcher
from airflow.contrib.kubernetes.pod import Resources
from airflow.utils.state import State
from airflow.contrib.kubernetes.volume_mount import VolumeMount # noqa
from airflow.contrib.kubernetes.volume import Volume # noqa
from airflow.contrib.kubernetes.secret import Secret # noqa
template_fields = ('templates_dict',)
template_ext = tuple()
ui_color = '#ffefeb'
class KubernetesPodOperator(BaseOperator):
"""
Execute a task in a Kubernetes Pod
:param image: Docker image you wish to launch. Defaults to dockerhub.io,
but fully qualified URLS will point to custom repositories
:type image: str
:param: namespace: the namespace to run within kubernetes
:type: namespace: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provide.
:type cmds: list of str
:param arguments: arguments of to the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list of str
:param volume_mounts: volumeMounts for launched pod
:type volume_mounts: list of VolumeMount
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes
:type volumes: list of Volume
:param labels: labels to apply to the Pod
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod
:type startup_timeout_seconds: int
:param name: name of the task you want to run,
will be used to generate a pod id
:type name: str
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: dict
:param secrets: Kubernetes secrets to inject in the container,
They can be exposed as environment vars or files in a volume.
:type secrets: list of Secret
:param in_cluster: run kubernetes client with in_cluster configuration
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: string
:param get_logs: get the stdout of the container as logs of the tasks
:type get_logs: bool
:param affinity: A dict containing a group of affinity scheduling rules
:type affinity: dict
"""
template_fields = ('cmds', 'arguments', 'env_vars')
def execute(self, context):
try:
client = kube_client.get_kube_client(in_cluster=self.in_cluster,
cluster_context=self.cluster_context)
gen = pod_generator.PodGenerator()
for mount in self.volume_mounts:
gen.add_mount(mount)
for volume in self.volumes:
gen.add_volume(volume)
pod = gen.make_pod(
namespace=self.namespace,
image=self.image,
pod_id=self.name,
cmds=self.cmds,
arguments=self.arguments,
labels=self.labels,
)
pod.secrets = self.secrets
pod.envs = self.env_vars
pod.image_pull_policy = self.image_pull_policy
pod.annotations = self.annotations
pod.resources = self.resources
pod.affinity = self.affinity
launcher = pod_launcher.PodLauncher(kube_client=client)
final_state = launcher.run_pod(
pod,
startup_timeout=self.startup_timeout_seconds,
get_logs=self.get_logs)
if final_state != State.SUCCESS:
raise AirflowException(
'Pod returned a failure: {state}'.format(state=final_state)
)
except AirflowException as ex:
raise AirflowException('Pod Launching failed: {error}'.format(error=ex))
@apply_defaults
def __init__(self,
namespace,
image,
name,
cmds=None,
arguments=None,
volume_mounts=None,
volumes=None,
env_vars=None,
secrets=None,
in_cluster=False,
cluster_context=None,
labels=None,
startup_timeout_seconds=120,
get_logs=True,
image_pull_policy='IfNotPresent',
annotations=None,
resources=None,
affinity=None,
*args,
**kwargs):
super(KubernetesPodOperator, self).__init__(*args, **kwargs)
self.image = image
self.namespace = namespace
self.cmds = cmds or []
self.arguments = arguments or []
self.labels = labels or {}
self.startup_timeout_seconds = startup_timeout_seconds
self.name = name
self.env_vars = env_vars or {}
self.volume_mounts = volume_mounts or []
self.volumes = volumes or []
self.secrets = secrets or []
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.get_logs = get_logs
self.image_pull_policy = image_pull_policy
self.annotations = annotations or {}
self.affinity = affinity or {}
self.resources = resources or Resources()
| 40.814103
| 87
| 0.649914
|
11003e16d0eade068f9aaed5b7d1d8679372d74b
| 1,724
|
py
|
Python
|
scout/commands/view/individuals.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/view/individuals.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
scout/commands/view/individuals.py
|
mhkc/scout
|
a7162f28c0f3490c3f3376268118fa8e6072a9db
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import click
from flask.cli import with_appcontext
from scout.server.extensions import store
from scout.constants import SEX_MAP, PHENOTYPE_MAP
LOG = logging.getLogger(__name__)
@click.command("individuals", short_help="Display individuals")
@click.option("-i", "--institute", help="institute id of related cases")
@click.option("--causatives", is_flag=True, help="Has causative variants")
@click.option("-c", "--case-id")
@with_appcontext
def individuals(institute, causatives, case_id):
"""Show all individuals from all cases in the database"""
LOG.info("Running scout view individuals")
adapter = store
individuals = []
if case_id:
case = adapter.case(case_id=case_id)
if case:
cases = [case]
else:
LOG.info("Could not find case %s", case_id)
return
else:
cases = [
case_obj
for case_obj in adapter.cases(collaborator=institute, has_causatives=causatives)
]
if len(cases) == 0:
LOG.info("Could not find cases that match criteria")
return
individuals = (ind_obj for case_obj in cases for ind_obj in case_obj["individuals"])
click.echo("#case_id\tind_id\tdisplay_name\tsex\tphenotype\tmother\tfather")
for case in cases:
for ind_obj in case["individuals"]:
ind_info = [
case["_id"],
ind_obj["individual_id"],
ind_obj["display_name"],
SEX_MAP[int(ind_obj["sex"])],
PHENOTYPE_MAP[ind_obj["phenotype"]],
ind_obj["mother"],
ind_obj["father"],
]
click.echo("\t".join(ind_info))
| 32.528302
| 92
| 0.612529
|
24063ae4bec035ad2dff8b21eff2dceba125b4af
| 2,111
|
py
|
Python
|
src/OTLMOW/OTLModel/Classes/ProefZichtbaarheidBijNacht.py
|
davidvlaminck/OTLClassPython
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | 2
|
2022-02-01T08:58:11.000Z
|
2022-02-08T13:35:17.000Z
|
src/OTLMOW/OTLModel/Classes/ProefZichtbaarheidBijNacht.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
src/OTLMOW/OTLModel/Classes/ProefZichtbaarheidBijNacht.py
|
davidvlaminck/OTLMOW
|
71330afeb37c3ea6d9981f521ff8f4a3f8b946fc
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.Proef import Proef
from OTLMOW.OTLModel.Datatypes.FloatOrDecimalField import FloatOrDecimalField
from OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie
from OTLMOW.GeometrieArtefact.LijnGeometrie import LijnGeometrie
from OTLMOW.GeometrieArtefact.VlakGeometrie import VlakGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class ProefZichtbaarheidBijNacht(Proef, PuntGeometrie, LijnGeometrie, VlakGeometrie):
"""Bepaling van het retroreflecterend vermogen van een markering bij nacht."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/proefenmeting#ProefZichtbaarheidBijNacht'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
Proef.__init__(self)
LijnGeometrie.__init__(self)
PuntGeometrie.__init__(self)
VlakGeometrie.__init__(self)
self._retrotreflectiecoëfficiënt = OTLAttribuut(field=FloatOrDecimalField,
naam='retrotreflectiecoëfficiënt',
label='retrotreflectiecoëfficiënt',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/proefenmeting#ProefZichtbaarheidBijNacht.retrotreflectiecoëfficiënt',
usagenote='uitgedrukt in mcd. m-2.lux-1',
definition='De maat voor het retroreflecterend vermogen van een markering bij nacht.',
owner=self)
@property
def retrotreflectiecoëfficiënt(self):
"""De maat voor het retroreflecterend vermogen van een markering bij nacht."""
return self._retrotreflectiecoëfficiënt.get_waarde()
@retrotreflectiecoëfficiënt.setter
def retrotreflectiecoëfficiënt(self, value):
self._retrotreflectiecoëfficiënt.set_waarde(value, owner=self)
| 54.128205
| 181
| 0.670298
|
d16b65fbd4fa18a9b16a7cf25fd69f0380df6792
| 1,209
|
py
|
Python
|
airbyte-integrations/connectors/source-webflow/source_webflow/webflow_to_airbyte_mapping.py
|
faros-ai/airbyte
|
2ebafe1817a71b5c0a8f8b6f448dbef9db708668
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-webflow/source_webflow/webflow_to_airbyte_mapping.py
|
faros-ai/airbyte
|
2ebafe1817a71b5c0a8f8b6f448dbef9db708668
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-webflow/source_webflow/webflow_to_airbyte_mapping.py
|
faros-ai/airbyte
|
2ebafe1817a71b5c0a8f8b6f448dbef9db708668
|
[
"MIT"
] | 1
|
2021-11-12T14:27:56.000Z
|
2021-11-12T14:27:56.000Z
|
#
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
class WebflowToAirbyteMapping:
"""
The following disctionary is used for dynamically pulling the schema from Webflow, and mapping it to an Airbyte-compatible json-schema
Webflow: https://developers.webflow.com/#get-collection-with-full-schema
Airbyte/json-schema: https://docs.airbyte.com/understanding-airbyte/supported-data-types/
"""
webflow_to_airbyte_mapping = {
"Bool": {"type": ["null", "boolean"]},
"Date": {
"type": ["null", "string"],
"format": "date-time",
},
"Email": {
"type": ["null", "string"],
},
"ImageRef": {"type": ["null", "object"], "additionalProperties": True},
"ItemRef": {"type": ["null", "string"]},
"ItemRefSet": {"type": ["null", "array"]},
"Link": {"type": ["null", "string"]},
"Number": {"type": ["null", "number"]},
"Option": {"type": ["null", "string"]},
"PlainText": {"type": ["null", "string"]},
"RichText": {"type": ["null", "string"]},
"User": {"type": ["null", "string"]},
"Video": {"type": ["null", "string"]},
}
| 35.558824
| 138
| 0.53019
|
e4adfdc6488e393bd2cc5c3bdc0583ef11b6018a
| 4,408
|
py
|
Python
|
data/templates/account/signin.mako.py
|
sumukh210991/Cyberweb
|
297bd54c9e223d38818b802087055e397c403f1c
|
[
"Apache-2.0"
] | null | null | null |
data/templates/account/signin.mako.py
|
sumukh210991/Cyberweb
|
297bd54c9e223d38818b802087055e397c403f1c
|
[
"Apache-2.0"
] | null | null | null |
data/templates/account/signin.mako.py
|
sumukh210991/Cyberweb
|
297bd54c9e223d38818b802087055e397c403f1c
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1467226821.321253
_enable_loop = True
_template_filename = '/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/account/signin.mako'
_template_uri = '/account/signin.mako'
_source_encoding = 'utf-8'
from webhelpers.html import escape
_exports = ['header', 'footer', 'col2right', 'col2main', 'headtags']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
pass
def _mako_inherit(template, context):
_mako_generate_namespaces(context)
return runtime._inherit_from(context, u'/account/account.layout.mako', _template_uri)
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n\n')
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_header(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_footer(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col2right(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n<h3> </h3>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_col2main(context):
__M_caller = context.caller_stack._push_frame()
try:
c = context.get('c', UNDEFINED)
config = context.get('config', UNDEFINED)
__M_writer = context.writer()
__M_writer(u'\n<blockquote>\n<h3> Sign into ')
__M_writer(escape(config.get('project.fullname','CyberWeb')))
__M_writer(u'</h3> <p>\n<form action="/signin" method="post">\n<blockquote>\n <table width=200>\n')
if c.message:
__M_writer(u' <tr>\n <font color=red>')
__M_writer(escape(c.message))
__M_writer(u'</font>\n <p>\n </tr>\n')
__M_writer(u' <tr>\n <td align=right>\n Username: \n </td>\n <td align=right>\n <input type="text" name="username" value="')
__M_writer(escape(c.username))
__M_writer(u'">\n </td>\n </tr>\n <tr>\n <td align=right>\n Password: \n </td>\n <td align=right>\n <input type="password" name="password">\n </td>\n </tr>\n <tr>\n <td align=center colspan=2>\n <input type="submit" value="Signin" name="authform" />\n </td>\n </tr>\n </table>\n <blockquote>\n <b><a href="/signup">Request New Account.</a></b>\n <br><b>Forgot your password?</b> [request password here]\n <br><b>Having problems?</b> [contact us here]\n </blockquote>\n</blockquote>\n</form>\n\n\t\n</blockquote>\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_headtags(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"source_encoding": "utf-8", "line_map": {"28": 0, "33": 1, "34": 4, "35": 7, "36": 10, "37": 57, "38": 61, "44": 3, "48": 3, "54": 9, "58": 9, "64": 59, "68": 59, "74": 12, "80": 12, "81": 14, "82": 14, "83": 18, "84": 19, "85": 20, "86": 20, "87": 24, "88": 29, "89": 29, "95": 6, "99": 6, "105": 99}, "uri": "/account/signin.mako", "filename": "/home/sumukh/Documents/thesis/Cyberweb/cyberweb/cyberweb/templates/account/signin.mako"}
__M_END_METADATA
"""
| 40.072727
| 644
| 0.616833
|
17aa8c96506e60a79ca88d25a831b6d7e528ca5c
| 547
|
py
|
Python
|
manage.py
|
ryankibayhan/ryb-ecommerce
|
15fa3bcb624be528926458b466ad7fe7fef5158e
|
[
"MIT"
] | null | null | null |
manage.py
|
ryankibayhan/ryb-ecommerce
|
15fa3bcb624be528926458b466ad7fe7fef5158e
|
[
"MIT"
] | 12
|
2019-12-04T23:48:45.000Z
|
2022-03-11T23:53:30.000Z
|
manage.py
|
ryankibayhan/ryb-ecommerce
|
15fa3bcb624be528926458b466ad7fe7fef5158e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ryb.settings.development')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.1875
| 79
| 0.689214
|
0f8c46d159227775a491d6beb5ae6f6089b949f2
| 1,943
|
py
|
Python
|
write_file.py
|
jayagascon/pos-chair
|
a5eefdb23df97e21bbc72e3f84711c2ab6377357
|
[
"CC0-1.0"
] | null | null | null |
write_file.py
|
jayagascon/pos-chair
|
a5eefdb23df97e21bbc72e3f84711c2ab6377357
|
[
"CC0-1.0"
] | null | null | null |
write_file.py
|
jayagascon/pos-chair
|
a5eefdb23df97e21bbc72e3f84711c2ab6377357
|
[
"CC0-1.0"
] | null | null | null |
# adapted from https://learn.adafruit.com/mcp3008-spi-adc/python-circuitpython
import busio
import digitalio
import board
import time
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
import csv
from datetime import datetime
# create the spi bus
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select)
cs = digitalio.DigitalInOut(board.D22)
# create the mcp object
mcp = MCP.MCP3008(spi, cs)
# create an analog input channel on pin 0-5
channel_0 = AnalogIn(mcp, MCP.P0)
channel_1 = AnalogIn(mcp, MCP.P1)
channel_2 = AnalogIn(mcp, MCP.P2)
channel_3 = AnalogIn(mcp, MCP.P3)
channel_4 = AnalogIn(mcp, MCP.P4)
#channel_5 = AnalogIn(mcp, MCP.P5)
i = 10376
#writing data to csv file
while True:
with open('archive/poschair.csv', mode='a') as poschair_file:
poschair_writer = csv.writer(poschair_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
datetime_obj = datetime.now()
poschair_writer.writerow([i, datetime_obj,"Ch0", channel_0.value, channel_0.voltage, "Ch1", channel_1.value, channel_1.voltage,
"Ch2", channel_2.value, channel_2.voltage, "Ch3", channel_3.value, channel_3.voltage, "Ch4", channel_4.value, channel_4.voltage, "correct"])
print(i, datetime_obj, channel_0.value, channel_0.voltage, channel_1.value, channel_1.voltage, channel_2.value, channel_2.voltage, channel_3.value, channel_3.voltage, channel_4.value, channel_4.voltage)
#print('Written row ' + str(i) + ' on ' + str(datetime_obj))
time.sleep(1)
i += 1
#print values from each channel every 10 seconds
#while True:
# for i in range(6):
# print('Channel ' + str(i) + ' Raw Value: ', eval("channel_" + str(i) +".value"))
# print('Channel ' + str(i) + ' ADC Voltage: ' + str(eval("channel_" + str(i) +".voltage")) + 'V')
# time.sleep(10)
# print('------------------')
| 33.5
| 210
| 0.689655
|
d0aa4eb1042c9b42b82401adf9d84009fcf90f68
| 11,263
|
py
|
Python
|
exifread/__init__.py
|
basnijholt/exif-py
|
053eb84744cb138f8ad3a77cfa03cca735874369
|
[
"BSD-3-Clause"
] | null | null | null |
exifread/__init__.py
|
basnijholt/exif-py
|
053eb84744cb138f8ad3a77cfa03cca735874369
|
[
"BSD-3-Clause"
] | null | null | null |
exifread/__init__.py
|
basnijholt/exif-py
|
053eb84744cb138f8ad3a77cfa03cca735874369
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Read Exif metadata from tiff and jpeg files.
"""
from .exif_log import get_logger
from .classes import *
from .tags import *
from .utils import ord_
from .heic import HEICExifFinder
__version__ = '2.1.2'
logger = get_logger()
def increment_base(data, base):
return ord_(data[base + 2]) * 256 + ord_(data[base + 3]) + 2
def process_file(f, stop_tag=DEFAULT_STOP_TAG, details=True, strict=False, debug=False, truncate_tags=True, auto_seek=True):
"""
Process an image file (expects an open file object).
This is the function that has to deal with all the arbitrary nasty bits
of the EXIF standard.
"""
if auto_seek:
f.seek(0)
# by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it's a JPEG or TIFF
data = f.read(12)
if data[0:2] in [b'II', b'MM']:
# it's a TIFF file
logger.debug("TIFF format recognized in data[0:2]")
f.seek(0)
endian = f.read(1)
f.read(1)
offset = 0
elif data[4:12] == b'ftypheic':
f.seek(0)
heic = HEICExifFinder (f)
offset, endian = heic.find_exif()
elif data[0:2] == b'\xFF\xD8':
# it's a JPEG file
logger.debug("JPEG format recognized data[0:2]=0x%X%X", ord_(data[0]), ord_(data[1]))
base = 2
logger.debug("data[2]=0x%X data[3]=0x%X data[6:10]=%s",
ord_(data[2]), ord_(data[3]), data[6:10])
while ord_(data[2]) == 0xFF and data[6:10] in (b'JFIF', b'JFXX', b'OLYM', b'Phot'):
length = ord_(data[4]) * 256 + ord_(data[5])
logger.debug(" Length offset is %s", length)
f.read(length - 8)
# fake an EXIF beginning of file
# I don't think this is used. --gd
data = b'\xFF\x00' + f.read(10)
fake_exif = 1
if base > 2:
logger.debug(" Added to base")
base = base + length + 4 - 2
else:
logger.debug(" Added to zero")
base = length + 4
logger.debug(" Set segment base to 0x%X", base)
# Big ugly patch to deal with APP2 (or other) data coming before APP1
f.seek(0)
# in theory, this could be insufficient since 64K is the maximum size--gd
data = f.read(base + 4000)
# base = 2
while 1:
logger.debug(" Segment base 0x%X", base)
if data[base:base + 2] == b'\xFF\xE1':
# APP1
logger.debug(" APP1 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
if data[base + 4:base + 8] == b"Exif":
logger.debug(" Decrement base by 2 to get to pre-segment header (for compatibility with later code)")
base -= 2
break
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xE0':
# APP0
logger.debug(" APP0 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xE2':
# APP2
logger.debug(" APP2 at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xEE':
# APP14
logger.debug(" APP14 Adobe segment at base 0x%X", base)
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
logger.debug(" There is useful EXIF-like data here, but we have no parser for it.")
elif data[base:base + 2] == b'\xFF\xDB':
logger.debug(" JPEG image data at base 0x%X No more segments are expected.",
base)
break
elif data[base:base + 2] == b'\xFF\xD8':
# APP12
logger.debug(" FFD8 segment at base 0x%X", base)
logger.debug(" Got 0x%X 0x%X and %s instead",
ord_(data[base]),
ord_(data[base + 1]),
data[4 + base:10 + base])
logger.debug(" Length: 0x%X 0x%X", ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug(" Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
elif data[base:base + 2] == b'\xFF\xEC':
# APP12
logger.debug(" APP12 XMP (Ducky) or Pictureinfo segment at base 0x%X",
base)
logger.debug(" Got 0x%X and 0x%X instead", ord_(data[base]),
ord_(data[base + 1]))
logger.debug(" Length: 0x%X 0x%X",
ord_(data[base + 2]),
ord_(data[base + 3]))
logger.debug("Code: %s", data[base + 4:base + 8])
increment = increment_base(data, base)
logger.debug(" Increment base by %s", increment)
base += increment
logger.debug(
" There is useful EXIF-like data here (quality, comment, copyright), but we have no parser for it.")
else:
try:
increment = increment_base(data, base)
logger.debug(" Got 0x%X and 0x%X instead",
ord_(data[base]),
ord_(data[base + 1]))
except IndexError:
logger.debug(" Unexpected/unhandled segment type or file content.")
return {}
else:
logger.debug(" Increment base by %s", increment)
base += increment
f.seek(base + 12)
if ord_(data[2 + base]) == 0xFF and data[6 + base:10 + base] == b'Exif':
# detected EXIF header
offset = f.tell()
endian = f.read(1)
#HACK TEST: endian = 'M'
elif ord_(data[2 + base]) == 0xFF and data[6 + base:10 + base + 1] == b'Ducky':
# detected Ducky header.
logger.debug("EXIF-like header (normally 0xFF and code): 0x%X and %s",
ord_(data[2 + base]), data[6 + base:10 + base + 1])
offset = f.tell()
endian = f.read(1)
elif ord_(data[2 + base]) == 0xFF and data[6 + base:10 + base + 1] == b'Adobe':
# detected APP14 (Adobe)
logger.debug("EXIF-like header (normally 0xFF and code): 0x%X and %s",
ord_(data[2 + base]), data[6 + base:10 + base + 1])
offset = f.tell()
endian = f.read(1)
else:
# no EXIF information
logger.debug("No EXIF header expected data[2+base]==0xFF and data[6+base:10+base]===Exif (or Duck)")
logger.debug("Did get 0x%X and %s",
ord_(data[2 + base]), data[6 + base:10 + base + 1])
return {}
else:
# file format not recognized
logger.debug("File format not recognized.")
return {}
endian = chr(ord_(endian[0]))
# deal with the EXIF info we found
logger.debug("Endian format is %s (%s)", endian, {
'I': 'Intel',
'M': 'Motorola',
'\x01': 'Adobe Ducky',
'd': 'XMP/Adobe unknown'
}[endian])
hdr = ExifHeader(f, endian, offset, fake_exif, strict, debug, details, truncate_tags)
ifd_list = hdr.list_ifd()
thumb_ifd = False
ctr = 0
for ifd in ifd_list:
if ctr == 0:
ifd_name = 'Image'
elif ctr == 1:
ifd_name = 'Thumbnail'
thumb_ifd = ifd
else:
ifd_name = 'IFD %d' % ctr
logger.debug('IFD %d (%s) at offset %s:', ctr, ifd_name, ifd)
hdr.dump_ifd(ifd, ifd_name, stop_tag=stop_tag)
ctr += 1
# EXIF IFD
exif_off = hdr.tags.get('Image ExifOffset')
if exif_off:
logger.debug('Exif SubIFD at offset %s:', exif_off.values[0])
hdr.dump_ifd(exif_off.values[0], 'EXIF', stop_tag=stop_tag)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if details and 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags:
hdr.decode_maker_note()
# extract thumbnails
if details and thumb_ifd:
hdr.extract_tiff_thumbnail(thumb_ifd)
hdr.extract_jpeg_thumbnail()
# parse XMP tags (experimental)
if debug and details:
xmp_string = b''
# Easy we already have them
if 'Image ApplicationNotes' in hdr.tags:
logger.debug('XMP present in Exif')
xmp_string = make_string(hdr.tags['Image ApplicationNotes'].values)
# We need to look in the entire file for the XML
else:
logger.debug('XMP not in Exif, searching file for XMP info...')
xml_started = False
xml_finished = False
for line in f:
open_tag = line.find(b'<x:xmpmeta')
close_tag = line.find(b'</x:xmpmeta>')
if open_tag != -1:
xml_started = True
line = line[open_tag:]
logger.debug('XMP found opening tag at line position %s' % open_tag)
if close_tag != -1:
logger.debug('XMP found closing tag at line position %s' % close_tag)
line_offset = 0
if open_tag != -1:
line_offset = open_tag
line = line[:(close_tag - line_offset) + 12]
xml_finished = True
if xml_started:
xmp_string += line
if xml_finished:
break
logger.debug('XMP Finished searching for info')
if xmp_string:
hdr.parse_xmp(xmp_string)
return hdr.tags
| 41.560886
| 124
| 0.500666
|
b334b84e25a98f663b9646b24d691c358329c2b8
| 1,913
|
py
|
Python
|
deliver/functions_will/rotateDTI.py
|
mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso
|
f094c706db815f91cf61d1d501c2a9030b9b54d3
|
[
"MIT"
] | null | null | null |
deliver/functions_will/rotateDTI.py
|
mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso
|
f094c706db815f91cf61d1d501c2a9030b9b54d3
|
[
"MIT"
] | null | null | null |
deliver/functions_will/rotateDTI.py
|
mariecpereira/Extracao-de-Caracteristicas-Corpo-Caloso
|
f094c706db815f91cf61d1d501c2a9030b9b54d3
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# In[ ]:
def rotateDTI(evl, evt, R):
s,m,n = evl[0].shape
# ====== DETERMINE TARGET DOMAIN SIZE AND A TRANSLATION TO FIT THE ROTATED IMAGE =======
# VERTICES FROM THE CUBE DEFINING THE ORIGINAL VOLUME
cube = np.array([[0,0,0,1],
[0,0,n,1],
[0,m,n,1],
[0,m,0,1],
[s,m,0,1],
[s,0,0,1],
[s,0,n,1],
[s,m,n,1]]).transpose()
# COMPUTE THE FIT TRANSLATION AND COMBINE WITH THE ROTATION
cube = np.dot(R,cube)
t = -cube.min(axis=1)
Tr = np.diag(np.ones(4, dtype='float'))
Tr[:3,3] = t[:3]
T = np.dot(Tr,R)
# DEFINE THE TARGET DOMAIN
cube = cube + t.reshape(4,1)
domain = np.ceil(cube.max(axis=1))[:3].astype('int')
# === TRANSFORMATION ===
invT = np.linalg.inv(T)
N = domain.prod()
# GET INDICES IN TARGET SPACE
points = np.array(np.indices(domain)).reshape(3,N)
points = np.vstack((points, np.ones(N)))
# COMPUTE POINT COORDINATES WITH NEAREST NEIGHBOR INTERPOLATION
points = np.dot(invT, points)[:3]
points = np.round(points).astype('int')
out_of_space = np.logical_or(points<0, points>=np.array([s,m,n]).reshape(3,1)).max(axis=0)
points[:,out_of_space] = 0
z,y,x = points
# APPLY TRANSFORMATION TO THE EIGENVALUES VOLUME
eigenvals = evl[:,z,y,x].copy()
eigenvals[:,out_of_space] = 0
eigenvals.shape = (3,) + tuple(domain)
# APPLY ROTATION TO THE EIGENVECTORS
evt = evt.copy()
evt.shape = (3,3,s*m*n)
for i in xrange(3):
evt[i] = np.dot(R[:3,:3],evt[i])
evt.shape = (3,3,s,m,n)
# APPLY TRANSFORMATION TO THE EIGENVECTORS VOLUME
eigenvects = evt[:,:,z,y,x]
eigenvects[:,:,out_of_space] = 0
eigenvects.shape = (3,3) + tuple(domain)
return (eigenvals, eigenvects, T)
# In[ ]:
| 26.569444
| 94
| 0.550444
|
42c52b62ca36d09d1970509163f811a1b512954e
| 2,376
|
py
|
Python
|
services/classification-api/manage.py
|
conlamon/satellite-classification-flask-api
|
0caf5e91d8545da07ef65704dfbdb63d21fa8e76
|
[
"MIT"
] | 5
|
2018-05-24T18:46:39.000Z
|
2020-03-23T20:37:46.000Z
|
services/classification-api/manage.py
|
conlamon/satellite-classification-flask-api
|
0caf5e91d8545da07ef65704dfbdb63d21fa8e76
|
[
"MIT"
] | 1
|
2019-08-31T23:31:41.000Z
|
2019-09-01T19:29:13.000Z
|
services/classification-api/manage.py
|
conlamon/satellite-classification-flask-api
|
0caf5e91d8545da07ef65704dfbdb63d21fa8e76
|
[
"MIT"
] | 1
|
2019-08-27T18:20:15.000Z
|
2019-08-27T18:20:15.000Z
|
import unittest
from flask.cli import FlaskGroup
from project import create_app, db
from project.api.models import ImageTile
from project.api.inference_helper import load_model
import coverage
# ------- Setup code coverage reporting
# Configure the code coverage report
COV = coverage.coverage(
branch=True,
include='project/*',
omit=[
'project/tests/*',
'project/config.py'
]
)
COV.start()
# ------- Start Flask App
app = create_app()
cli = FlaskGroup(create_app=create_app)
# Load the TensorFlow model before finishing app load
print("Loading the model")
load_model()
# Handle CORS
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type, Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE')
return response
# ------- Setup CLI commands for docker deployment
# Command to recreate the database
@cli.command()
def recreate_db():
db.drop_all()
db.create_all()
db.session.commit()
# Command to run unit tests
@cli.command()
def test():
"""Runs tests without code coverage report"""
tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1
# Command to seed the database with information provided in a .csv file
# Note: Use psycopg2 copy_from command to perform this operation efficiently
@cli.command()
def seed_db():
"""Seeds the database with provided csv data of tile location"""
cursor = db.session.connection().connection.cursor()
with open("project/static/dbload/tiles-db-init.csv", 'r') as data_file:
# Skip the header row
next(data_file)
cursor.copy_from(data_file, 'tiles', sep=',')
db.session.commit()
# Run code coverage report
@cli.command()
def cov():
"""Runs the unt tests with coverage"""
tests = unittest.TestLoader().discover('project/tests')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
COV.stop()
COV.save()
print('Coverage Summary:')
COV.report()
COV.html_report()
COV.erase()
return 0
return 1
if __name__ == '__main__':
cli()
| 26.696629
| 85
| 0.688552
|
4246541b3c37b31cef87bf2680e15f5c59c57894
| 10,171
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/network/v20181201/virtual_network_tap.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20181201/virtual_network_tap.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/network/v20181201/virtual_network_tap.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['VirtualNetworkTap']
class VirtualNetworkTap(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
destination_load_balancer_front_end_ip_configuration: Optional[pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']]] = None,
destination_network_interface_ip_configuration: Optional[pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']]] = None,
destination_port: Optional[pulumi.Input[int]] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tap_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Virtual Network Tap resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['FrontendIPConfigurationArgs']] destination_load_balancer_front_end_ip_configuration: The reference to the private IP address on the internal Load Balancer that will receive the tap
:param pulumi.Input[pulumi.InputType['NetworkInterfaceIPConfigurationArgs']] destination_network_interface_ip_configuration: The reference to the private IP Address of the collector nic that will receive the tap
:param pulumi.Input[int] destination_port: The VXLAN destination port that will receive the tapped traffic.
:param pulumi.Input[str] etag: Gets a unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] tap_name: The name of the virtual network tap.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['destination_load_balancer_front_end_ip_configuration'] = destination_load_balancer_front_end_ip_configuration
__props__['destination_network_interface_ip_configuration'] = destination_network_interface_ip_configuration
__props__['destination_port'] = destination_port
__props__['etag'] = etag
__props__['id'] = id
__props__['location'] = location
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
if tap_name is None:
raise TypeError("Missing required property 'tap_name'")
__props__['tap_name'] = tap_name
__props__['name'] = None
__props__['network_interface_tap_configurations'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200301:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VirtualNetworkTap"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VirtualNetworkTap")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VirtualNetworkTap, __self__).__init__(
'azure-nextgen:network/v20181201:VirtualNetworkTap',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VirtualNetworkTap':
"""
Get an existing VirtualNetworkTap resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return VirtualNetworkTap(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="destinationLoadBalancerFrontEndIPConfiguration")
def destination_load_balancer_front_end_ip_configuration(self) -> pulumi.Output[Optional['outputs.FrontendIPConfigurationResponse']]:
"""
The reference to the private IP address on the internal Load Balancer that will receive the tap
"""
return pulumi.get(self, "destination_load_balancer_front_end_ip_configuration")
@property
@pulumi.getter(name="destinationNetworkInterfaceIPConfiguration")
def destination_network_interface_ip_configuration(self) -> pulumi.Output[Optional['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
The reference to the private IP Address of the collector nic that will receive the tap
"""
return pulumi.get(self, "destination_network_interface_ip_configuration")
@property
@pulumi.getter(name="destinationPort")
def destination_port(self) -> pulumi.Output[Optional[int]]:
"""
The VXLAN destination port that will receive the tapped traffic.
"""
return pulumi.get(self, "destination_port")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInterfaceTapConfigurations")
def network_interface_tap_configurations(self) -> pulumi.Output[Sequence['outputs.NetworkInterfaceTapConfigurationResponse']]:
"""
Specifies the list of resource IDs for the network interface IP configuration that needs to be tapped.
"""
return pulumi.get(self, "network_interface_tap_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the virtual network tap. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resourceGuid property of the virtual network tap.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.351485
| 1,291
| 0.687543
|
297933311d117125483d95d5faaa2fe1891aaf7d
| 600
|
py
|
Python
|
fastreid/data/prepare_data.py
|
huang-ju-git/fast-reid
|
ef55d8e3ac2995a7969468ea165e3decb2b3f212
|
[
"Apache-2.0"
] | null | null | null |
fastreid/data/prepare_data.py
|
huang-ju-git/fast-reid
|
ef55d8e3ac2995a7969468ea165e3decb2b3f212
|
[
"Apache-2.0"
] | null | null | null |
fastreid/data/prepare_data.py
|
huang-ju-git/fast-reid
|
ef55d8e3ac2995a7969468ea165e3decb2b3f212
|
[
"Apache-2.0"
] | null | null | null |
import pickle
import numpy as np
from torch import nn
import torch
# with open("msmt17-train-face.pkl","rb") as f:
# msmt_dict=pickle.load(f)
# keys=list(msmt_dict.keys())
# print(len(keys))
# keys_np=np.array(keys)
# np.save('msmt17-test-face-keys',keys_np)
# features=[]
# for term in keys:
# features.append(msmt_dict[term])
# features_np=np.array(features)
# np.save('msmt17-test-face-features',features_np)
pool_layer = nn.AdaptiveAvgPool2d(1)
features=torch.randn(32,2560).reshape(32,2560,1)
global_feat = pool_layer(features)
print(global_feat.shape)
torch.nn.AdaptiveAvgPool2d
| 24
| 51
| 0.738333
|
ec0b17f56b83cafcd8924af732b81f74468b0ea7
| 432
|
py
|
Python
|
numbas_lti/migrations/0038_lticonsumer_url.py
|
oscarsiles/numbas-lti-provider
|
ef7080a2593a800a1b9630c746e4f8667e2ec42d
|
[
"Apache-2.0"
] | 6
|
2016-12-12T14:41:33.000Z
|
2021-04-18T01:04:23.000Z
|
numbas_lti/migrations/0038_lticonsumer_url.py
|
oscarsiles/numbas-lti-provider
|
ef7080a2593a800a1b9630c746e4f8667e2ec42d
|
[
"Apache-2.0"
] | 206
|
2016-08-24T13:53:07.000Z
|
2022-03-31T09:14:43.000Z
|
numbas_lti/migrations/0038_lticonsumer_url.py
|
oscarsiles/numbas-lti-provider
|
ef7080a2593a800a1b9630c746e4f8667e2ec42d
|
[
"Apache-2.0"
] | 13
|
2016-10-23T04:53:30.000Z
|
2022-02-17T09:25:00.000Z
|
# Generated by Django 2.0 on 2018-05-03 11:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('numbas_lti', '0037_auto_20171024_1400'),
]
operations = [
migrations.AddField(
model_name='lticonsumer',
name='url',
field=models.URLField(blank=True, default='', verbose_name='Home URL of consumer'),
),
]
| 22.736842
| 95
| 0.613426
|
dd5647a317a31b25337113a97429268d1ca193ac
| 471
|
py
|
Python
|
thread_Mutex.py
|
matiji66/python-concurrent-study
|
dcee1b2573bae75efecc578b5461dc6767f6e570
|
[
"Apache-2.0"
] | null | null | null |
thread_Mutex.py
|
matiji66/python-concurrent-study
|
dcee1b2573bae75efecc578b5461dc6767f6e570
|
[
"Apache-2.0"
] | null | null | null |
thread_Mutex.py
|
matiji66/python-concurrent-study
|
dcee1b2573bae75efecc578b5461dc6767f6e570
|
[
"Apache-2.0"
] | null | null | null |
import time
import threading
# 3.4 线程锁、互斥锁Mutex
# 一个进程下可以启动多个线程,多个线程共享父进程的内存空间,也就意味着每个线程可以访问同一份数据,此时,如果2个线程同时要修改同一份数据,会出现什么状况?
def addNum():
global num # 在每个线程中都获取这个全局变量
print('--get num:', num)
time.sleep(1)
num -= 1 # 对此公共变量进行-1操作
num = 100 # 设定一个共享变量
thread_list = []
for i in range(100):
t = threading.Thread(target=addNum)
t.start()
thread_list.append(t)
for t in thread_list: # 等待所有线程执行完毕
t.join()
print('final num:', num)
| 18.84
| 78
| 0.677282
|
477d797944709f8b9db3f86ca2ada0619949b66a
| 12,170
|
py
|
Python
|
unittests/taptests.py
|
annacrombie/meson
|
3e2dba5b7cd107f60474b3cb01c307a9e2354868
|
[
"Apache-2.0"
] | 64
|
2015-01-09T13:45:23.000Z
|
2015-06-13T20:16:01.000Z
|
unittests/taptests.py
|
annacrombie/meson
|
3e2dba5b7cd107f60474b3cb01c307a9e2354868
|
[
"Apache-2.0"
] | 110
|
2015-01-09T01:35:56.000Z
|
2015-06-14T11:26:04.000Z
|
unittests/taptests.py
|
annacrombie/meson
|
3e2dba5b7cd107f60474b3cb01c307a9e2354868
|
[
"Apache-2.0"
] | 13
|
2015-01-05T09:08:37.000Z
|
2015-06-04T08:34:45.000Z
|
# Copyright 2016-2021 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import io
from mesonbuild.mtest import TAPParser, TestResult
class TAPParserTests(unittest.TestCase):
def assert_test(self, events, **kwargs):
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Test(**kwargs))
def assert_plan(self, events, **kwargs):
if 'skipped' not in kwargs:
kwargs['skipped'] = False
if 'explanation' not in kwargs:
kwargs['explanation'] = None
self.assertEqual(next(events), TAPParser.Plan(**kwargs))
def assert_version(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Version(**kwargs))
def assert_error(self, events):
self.assertEqual(type(next(events)), TAPParser.Error)
def assert_bailout(self, events, **kwargs):
self.assertEqual(next(events), TAPParser.Bailout(**kwargs))
def assert_last(self, events):
with self.assertRaises(StopIteration):
next(events)
def parse_tap(self, s):
parser = TAPParser()
return iter(parser.parse(io.StringIO(s)))
def parse_tap_v13(self, s):
events = self.parse_tap('TAP version 13\n' + s)
self.assert_version(events, version=13)
return events
def test_empty(self):
events = self.parse_tap('')
self.assert_last(events)
def test_empty_plan(self):
events = self.parse_tap('1..0')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_last(events)
def test_plan_directive(self):
events = self.parse_tap('1..0 # skipped for some reason')
self.assert_plan(events, num_tests=0, late=False, skipped=True,
explanation='for some reason')
self.assert_last(events)
events = self.parse_tap('1..1 # skipped for some reason\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=True,
explanation='for some reason')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('1..1 # todo not supported here\nok 1')
self.assert_error(events)
self.assert_plan(events, num_tests=1, late=False, skipped=False,
explanation='not supported here')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_ok(self):
events = self.parse_tap('ok')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_number(self):
events = self.parse_tap('ok 1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_with_name(self):
events = self.parse_tap('ok 1 abc')
self.assert_test(events, number=1, name='abc', result=TestResult.OK)
self.assert_last(events)
def test_one_test_not_ok(self):
events = self.parse_tap('not ok')
self.assert_test(events, number=1, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_one_test_todo(self):
events = self.parse_tap('not ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.EXPECTEDFAIL)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # TODO')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_one_test_skip(self):
events = self.parse_tap('ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
def test_one_test_skip_failure(self):
events = self.parse_tap('not ok 1 abc # SKIP')
self.assert_test(events, number=1, name='abc', result=TestResult.FAIL)
self.assert_last(events)
def test_many_early_plan(self):
events = self.parse_tap('1..4\nok 1\nnot ok 2\nok 3\nnot ok 4')
self.assert_plan(events, num_tests=4, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_last(events)
def test_many_late_plan(self):
events = self.parse_tap('ok 1\nnot ok 2\nok 3\nnot ok 4\n1..4')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_test(events, number=3, name='', result=TestResult.OK)
self.assert_test(events, number=4, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=4, late=True)
self.assert_last(events)
def test_directive_case(self):
events = self.parse_tap('ok 1 abc # skip')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP)
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS)
self.assert_last(events)
def test_directive_explanation(self):
events = self.parse_tap('ok 1 abc # skip why')
self.assert_test(events, number=1, name='abc', result=TestResult.SKIP,
explanation='why')
self.assert_last(events)
events = self.parse_tap('ok 1 abc # ToDo Because')
self.assert_test(events, number=1, name='abc', result=TestResult.UNEXPECTEDPASS,
explanation='Because')
self.assert_last(events)
def test_one_test_early_plan(self):
events = self.parse_tap('1..1\nok')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_one_test_late_plan(self):
events = self.parse_tap('ok\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_out_of_order(self):
events = self.parse_tap('ok 2')
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_middle_plan(self):
events = self.parse_tap('ok 1\n1..2\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=2, late=True)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many_plans(self):
events = self.parse_tap('1..1\n1..2\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_error(events)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_too_many(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..1')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=1, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..1\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few(self):
events = self.parse_tap('ok 1\nnot ok 2\n1..3')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_plan(events, num_tests=3, late=True)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..3\nok 1\nnot ok 2')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_error(events)
self.assert_last(events)
def test_too_few_bailout(self):
events = self.parse_tap('1..3\nok 1\nnot ok 2\nBail out! no third test')
self.assert_plan(events, num_tests=3, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_bailout(events, message='no third test')
self.assert_last(events)
def test_diagnostics(self):
events = self.parse_tap('1..1\n# ignored\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\n1..1\nok 1\n# ignored too')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap('# ignored\nok 1\n1..1\n# ignored too')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_plan(events, num_tests=1, late=True)
self.assert_last(events)
def test_empty_line(self):
events = self.parse_tap('1..1\n\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_unexpected(self):
events = self.parse_tap('1..1\ninvalid\nok 1')
self.assert_plan(events, num_tests=1, late=False)
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_last(events)
def test_version(self):
events = self.parse_tap('TAP version 13\n')
self.assert_version(events, version=13)
self.assert_last(events)
events = self.parse_tap('TAP version 12\n')
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap('1..0\nTAP version 13\n')
self.assert_plan(events, num_tests=0, late=False, skipped=True)
self.assert_error(events)
self.assert_last(events)
def test_yaml(self):
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def\n ...\nok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_test(events, number=2, name='', result=TestResult.OK)
self.assert_last(events)
events = self.parse_tap_v13('ok\n ---\n foo: abc\n bar: def')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_last(events)
events = self.parse_tap_v13('ok 1\n ---\n foo: abc\n bar: def\nnot ok 2')
self.assert_test(events, number=1, name='', result=TestResult.OK)
self.assert_error(events)
self.assert_test(events, number=2, name='', result=TestResult.FAIL)
self.assert_last(events)
| 41.821306
| 88
| 0.655793
|
0a2d658018f7e2bd03365830f952e33493fd8803
| 16,459
|
py
|
Python
|
custom/intrahealth/reports/taux_de_satisfaction_dashboard.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 1
|
2020-07-14T13:00:23.000Z
|
2020-07-14T13:00:23.000Z
|
custom/intrahealth/reports/taux_de_satisfaction_dashboard.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | 94
|
2020-12-11T06:57:31.000Z
|
2022-03-15T10:24:06.000Z
|
custom/intrahealth/reports/taux_de_satisfaction_dashboard.py
|
dimagilg/commcare-hq
|
ea1786238eae556bb7f1cbd8d2460171af1b619c
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.utils.functional import cached_property
from memoized import memoized
from corehq.apps.hqwebapp.decorators import use_nvd3
from corehq.apps.locations.models import SQLLocation
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.graph_models import Axis
from corehq.apps.reports.standard import ProjectReportParametersMixin, CustomProjectReport, DatespanMixin
from custom.intrahealth.filters import DateRangeFilter, ProgramsAndProductsFilter, YeksiNaaLocationFilter
from custom.intrahealth.reports.utils import change_id_keys_to_names
from custom.intrahealth.sqldata import SatisfactionRateAfterDeliveryPerProductData
from dimagi.utils.dates import force_to_date
from custom.intrahealth.utils import PNAMultiBarChart
class TauxDeSatisfactionReport(CustomProjectReport, DatespanMixin, ProjectReportParametersMixin):
slug = 'taux_de_satisfaction_par_produit_report'
name = 'Taux de Satisfaction par Produit'
comment = 'Quantité de produits livrés / Quantité de produits commandés'
default_rows = 10
exportable = True
report_template_path = 'yeksi_naa/tabular_report.html'
@property
def export_table(self):
report = [
[
self.name,
[],
]
]
headers = [x.html for x in self.headers]
rows = self.calculate_rows()
report[0][1].append(headers)
for row in rows:
location_name = row[0]
location_name = location_name.replace('<b>', '')
location_name = location_name.replace('</b>', '')
row_to_return = [location_name]
rows_length = len(row)
for r in range(1, rows_length):
value = row[r]['html']
value = value.replace('<b>', '')
value = value.replace('</b>', '')
row_to_return.append(value)
report[0][1].append(row_to_return)
return report
@use_nvd3
def decorator_dispatcher(self, request, *args, **kwargs):
super(TauxDeSatisfactionReport, self).decorator_dispatcher(request, *args, **kwargs)
@property
def fields(self):
return [DateRangeFilter, ProgramsAndProductsFilter, YeksiNaaLocationFilter]
@cached_property
def rendered_report_title(self):
return self.name
@property
def report_context(self):
if not self.needs_filters:
return {
'report': self.get_report_context(),
'charts': self.charts,
'title': self.name
}
return {}
@property
def selected_location(self):
try:
return SQLLocation.objects.get(location_id=self.request.GET.get('location_id'))
except SQLLocation.DoesNotExist:
return None
@property
def selected_location_type(self):
if self.selected_location:
location_type = self.selected_location.location_type.code
if location_type == 'region':
return 'District'
else:
return 'PPS'
else:
return 'Region'
@property
def products(self):
products_names = []
for row in self.clean_rows:
for product_info in row['products']:
product_name = product_info['product_name']
if product_name not in products_names:
products_names.append(product_name)
products_names = sorted(products_names)
return products_names
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn(self.selected_location_type),
)
products = self.products
for product in products:
headers.add_column(DataTablesColumn(product))
headers.add_column(DataTablesColumn('SYNTHESE'))
return headers
def get_report_context(self):
if self.needs_filters:
headers = []
rows = []
else:
rows = self.calculate_rows()
headers = self.headers
context = {
'report_table': {
'title': self.name,
'slug': self.slug,
'comment': self.comment,
'headers': headers,
'rows': rows,
'default_rows': self.default_rows,
}
}
return context
@property
def clean_rows(self):
return SatisfactionRateAfterDeliveryPerProductData(config=self.config).rows
def calculate_rows(self):
def data_to_rows(quantities_list):
quantities_to_return = []
added_locations = []
locations_with_products = {}
all_products = self.products
for quantity in quantities_list:
location_id = quantity['location_id']
location_name = quantity['location_name']
products = sorted(quantity['products'], key=lambda x: x['product_name'])
if location_id in added_locations:
length = len(locations_with_products[location_id])
product_ids = [p['product_id'] for p in locations_with_products[location_id]]
for product in products:
if product['product_id'] not in product_ids:
locations_with_products[location_id].append({
'product_name': product['product_name'],
'product_id': product['product_id'],
'amt_delivered_convenience': product['amt_delivered_convenience'],
'ideal_topup': product['ideal_topup'],
})
for r in range(0, length):
product_for_location = locations_with_products[location_id][r]
for product in products:
if product_for_location['product_id'] == product['product_id']:
amt_delivered_convenience = product['amt_delivered_convenience']
ideal_topup = product['ideal_topup']
locations_with_products[location_id][r]['amt_delivered_convenience'] += \
amt_delivered_convenience
locations_with_products[location_id][r]['ideal_topup'] += ideal_topup
else:
added_locations.append(location_id)
locations_with_products[location_id] = []
unique_products_for_location = []
products_to_add = []
for product in products:
product_name = product['product_name']
if product_name not in unique_products_for_location and product_name in all_products:
unique_products_for_location.append(product_name)
products_to_add.append(product)
else:
index = unique_products_for_location.index(product_name)
amt_delivered_convenience = product['amt_delivered_convenience']
ideal_topup = product['ideal_topup']
products_to_add[index]['amt_delivered_convenience'] += amt_delivered_convenience
products_to_add[index]['ideal_topup'] += ideal_topup
for product in products_to_add:
locations_with_products[location_id].append(product)
locations_with_products = change_id_keys_to_names(self.config['domain'], locations_with_products)
for location, products in locations_with_products.items():
products_names = [x['product_name'] for x in products]
for product_name in all_products:
if product_name not in products_names:
locations_with_products[location].append({
'product_id': None,
'product_name': product_name,
'amt_delivered_convenience': 0,
'ideal_topup': 0,
})
for location, products in locations_with_products.items():
quantities_to_return.append([
location,
])
products_list = sorted(products, key=lambda x: x['product_name'])
for product_info in products_list:
amt_delivered_convenience = product_info['amt_delivered_convenience']
ideal_topup = product_info['ideal_topup']
percent = (amt_delivered_convenience / float(ideal_topup) * 100) \
if ideal_topup != 0 else 'pas de données'
if percent != 'pas de données':
percent = '{:.2f} %'.format(percent)
quantities_to_return[-1].append({
'html': '{}'.format(percent),
'sort_key': percent
})
total_row = calculate_total_row(locations_with_products)
quantities_to_return.append(total_row)
quantities_to_return = add_total_column(locations_with_products, quantities_to_return)
return quantities_to_return
def add_total_column(locations_with_products, quantities_to_return):
length = len(quantities_to_return)
for location, products in locations_with_products.items():
locations_amt_delivered_convenience = 0
locations_ideal_topup = 0
for product in products:
locations_amt_delivered_convenience += product['amt_delivered_convenience']
locations_ideal_topup += product['ideal_topup']
locations_percent = (locations_amt_delivered_convenience / float(locations_ideal_topup) * 100) \
if locations_ideal_topup != 0 else 0
for r in range(0, length):
current_location = quantities_to_return[r][0]
if current_location == location:
quantities_to_return[r].append({
'html': '<b>{:.2f} %</b>'.format(locations_percent),
'sort_key': locations_percent
})
return quantities_to_return
def calculate_total_row(locations_with_products):
total_row_to_return = ['<b>SYNTHESE</b>']
locations_with_products['<b>SYNTHESE</b>'] = []
data_for_total_row = []
for location, products in locations_with_products.items():
products_list = sorted(products, key=lambda x: x['product_name'])
if not data_for_total_row:
for product_info in products_list:
amt_delivered_convenience = product_info['amt_delivered_convenience']
ideal_topup = product_info['ideal_topup']
product_name = product_info['product_name']
data_for_total_row.append([amt_delivered_convenience, ideal_topup, product_name])
else:
for r in range(0, len(products_list)):
product_info = products_list[r]
amt_delivered_convenience = product_info['amt_delivered_convenience']
ideal_topup = product_info['ideal_topup']
data_for_total_row[r][0] += amt_delivered_convenience
data_for_total_row[r][1] += ideal_topup
for data in data_for_total_row:
amt_delivered_convenience = data[0]
ideal_topup = data[1]
product_name = data[2]
locations_with_products['<b>SYNTHESE</b>'].append({
'amt_delivered_convenience': amt_delivered_convenience,
'ideal_topup': ideal_topup,
'product_name': product_name,
})
percent = (amt_delivered_convenience / float(ideal_topup) * 100) \
if ideal_topup != 0 else 0
total_row_to_return.append({
'html': '<b>{:.2f} %</b>'.format(percent),
'sort_key': percent,
})
return total_row_to_return
rows = data_to_rows(self.clean_rows)
return rows
@property
def charts(self):
chart = PNAMultiBarChart(None, Axis('Location'), Axis('Percent', format='.2f'))
chart.height = 550
chart.marginBottom = 150
chart.rotateLabels = -45
chart.showControls = False
chart.forceY = [0, 100]
def data_to_chart(quantities_list):
quantities_to_return = []
products_data = []
added_products = []
for quantity in quantities_list:
location_id = quantity['location_id']
location_name = quantity['location_name']
for product in quantity['products']:
product_id = product['product_id']
product_name = product['product_name']
amt_delivered_convenience = product['amt_delivered_convenience']
ideal_topup = product['ideal_topup']
if product_id not in added_products:
added_products.append(product_id)
product_dict = {
'product_id': product_id,
'product_name': product_name,
'location_id': location_id,
'location_name': location_name,
'amt_delivered_convenience': amt_delivered_convenience,
'ideal_topup': ideal_topup,
}
products_data.append(product_dict)
else:
for product_data in products_data:
if product_data['product_id'] == product_id:
product_data['amt_delivered_convenience'] += amt_delivered_convenience
product_data['ideal_topup'] += ideal_topup
products = sorted(products_data, key=lambda x: x['product_name'])
for product in products:
product_name = product['product_name']
amt_delivered_convenience = product['amt_delivered_convenience']
ideal_topup = product['ideal_topup']
percent = (amt_delivered_convenience / float(ideal_topup)) * 100 if ideal_topup != 0 else 0
quantities_to_return.append([
product_name,
{
'html': '{}'.format(percent),
'sort_key': percent
}
])
return quantities_to_return
def get_data_for_graph():
com = []
rows = data_to_chart(self.clean_rows)
for row in rows:
com.append({"x": row[0], "y": row[1]['sort_key']})
return [
{
"key": 'Taux de Satisfaction des produits',
'values': com
},
]
chart.data = get_data_for_graph()
return [chart]
@property
def config(self):
config = {
'domain': self.domain,
}
if self.request.GET.get('startdate'):
startdate = force_to_date(self.request.GET.get('startdate'))
else:
startdate = datetime.datetime.now()
if self.request.GET.get('enddate'):
enddate = force_to_date(self.request.GET.get('enddate'))
else:
enddate = datetime.datetime.now()
config['startdate'] = startdate
config['enddate'] = enddate
config['product_program'] = self.request.GET.get('product_program')
config['product_product'] = self.request.GET.get('product_product')
config['location_id'] = self.request.GET.get('location_id')
return config
| 41.987245
| 112
| 0.559512
|
2b837b502e5f3cba1893c8594b7ec730f5414187
| 815
|
py
|
Python
|
setup.py
|
gaoyunzhi/readee
|
fc1be745869a52b9e6e48f9e00245dd15819e706
|
[
"MIT"
] | null | null | null |
setup.py
|
gaoyunzhi/readee
|
fc1be745869a52b9e6e48f9e00245dd15819e706
|
[
"MIT"
] | null | null | null |
setup.py
|
gaoyunzhi/readee
|
fc1be745869a52b9e6e48f9e00245dd15819e706
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="readee",
version="0.0.119",
author="Yunzhi Gao",
author_email="gaoyunzhi@gmail.com",
description="Library for export webpage to reader mode html.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/gaoyunzhi/readee",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'bs4',
'readability-lxml',
'telegram_util',
'opencc-python-reimplemented',
'cached_url',
],
python_requires='>=3.0',
)
| 28.103448
| 66
| 0.635583
|
74007d872b8f7d6245f299fae5245b5ee4c86aa9
| 84
|
py
|
Python
|
curves/curves/__init__.py
|
SweRavn/curve
|
44ad54800fae1f434e7845963ef8b556c147c327
|
[
"MIT"
] | null | null | null |
curves/curves/__init__.py
|
SweRavn/curve
|
44ad54800fae1f434e7845963ef8b556c147c327
|
[
"MIT"
] | 10
|
2020-12-13T17:25:11.000Z
|
2020-12-28T17:16:59.000Z
|
curves/curves/__init__.py
|
SweRavn/curve
|
44ad54800fae1f434e7845963ef8b556c147c327
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 23 07:50:15 2019
@author: rore
"""
| 10.5
| 35
| 0.559524
|
df21d3ec4ea153e9d1ce60f89c26a61ff72f2c85
| 2,340
|
py
|
Python
|
python/kfserving/kfserving/models/v1_time.py
|
quanjielin/kfserving
|
b3e99d1131d12b8da151954340df2aea3ab161cb
|
[
"Apache-2.0"
] | null | null | null |
python/kfserving/kfserving/models/v1_time.py
|
quanjielin/kfserving
|
b3e99d1131d12b8da151954340df2aea3ab161cb
|
[
"Apache-2.0"
] | null | null | null |
python/kfserving/kfserving/models/v1_time.py
|
quanjielin/kfserving
|
b3e99d1131d12b8da151954340df2aea3ab161cb
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1Time(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""V1Time - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1Time, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Time):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.590909
| 80
| 0.538889
|
49de5ab38557e3338b4681512b94a30a32e3e2f9
| 10,990
|
py
|
Python
|
com/vmware/vmc/orgs/sddcs/networks/cgws/l2vpn_client.py
|
adammillerio/vsphere-automation-sdk-python
|
c07e1be98615201139b26c28db3aa584c4254b66
|
[
"MIT"
] | null | null | null |
com/vmware/vmc/orgs/sddcs/networks/cgws/l2vpn_client.py
|
adammillerio/vsphere-automation-sdk-python
|
c07e1be98615201139b26c28db3aa584c4254b66
|
[
"MIT"
] | null | null | null |
com/vmware/vmc/orgs/sddcs/networks/cgws/l2vpn_client.py
|
adammillerio/vsphere-automation-sdk-python
|
c07e1be98615201139b26c28db3aa584c4254b66
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.vmc.orgs.sddcs.networks.cgws.l2vpn.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Config(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.vmc.orgs.sddcs.networks.cgws.l2vpn.config'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ConfigStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
org,
sddc,
edge_id,
):
"""
Delete SDDC L2 VPN configuration.
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Compute Gateway Edge Identifier (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('delete',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
})
def get(self,
org,
sddc,
edge_id,
show_sensitive_data=None,
):
"""
Retrieve SDDC L2 VPN configuration.
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Compute Gateway Edge Identifier (required)
:type show_sensitive_data: :class:`bool` or ``None``
:param show_sensitive_data: (optional)
:rtype: :class:`com.vmware.vmc.model_client.Nsxl2vpn`
:return: com.vmware.vmc.model.Nsxl2vpn
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('get',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
'show_sensitive_data': show_sensitive_data,
})
def update(self,
org,
sddc,
edge_id,
nsxl2vpn,
):
"""
Modify SDDC L2 VPN configuration
:type org: :class:`str`
:param org: Organization identifier. (required)
:type sddc: :class:`str`
:param sddc: Sddc Identifier. (required)
:type edge_id: :class:`str`
:param edge_id: Compute Gateway Edge Identifier (required)
:type nsxl2vpn: :class:`com.vmware.vmc.model_client.Nsxl2vpn`
:param nsxl2vpn: (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad request. Request object passed is invalid.
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden. Authorization header not provided.
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not found. Requested object not found.
"""
return self._invoke('update',
{
'org': org,
'sddc': sddc,
'edge_id': edge_id,
'nsxl2vpn': nsxl2vpn,
})
class _ConfigStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/sddc/cgws/{edgeId}/l2vpn/config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
'show_sensitive_data': type.OptionalType(type.BooleanType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/sddc/cgws/{edgeId}/l2vpn/config',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
'show_sensitive_data': 'showSensitiveData',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'org': type.StringType(),
'sddc': type.StringType(),
'edge_id': type.StringType(),
'nsxl2vpn': type.ReferenceType('com.vmware.vmc.model_client', 'Nsxl2vpn'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/vmc/api/orgs/{org}/sddcs/{sddc}/networks/4.0/sddc/cgws/{edgeId}/l2vpn/config',
request_body_parameter='nsxl2vpn',
path_variables={
'org': 'org',
'sddc': 'sddc',
'edge_id': 'edgeId',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.vmc.model_client', 'Nsxl2vpn'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.VoidType(),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.vmc.orgs.sddcs.networks.cgws.l2vpn.config',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Config': Config,
}
| 38.027682
| 105
| 0.559873
|
9375e7434ca9b44ec32c4671a836dd70dc6c92a9
| 431
|
py
|
Python
|
back-end/RawFishSheep/app_order/migrations/0007_auto_20190516_1354.py
|
Coldarra/RawFishSheep
|
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
|
[
"Apache-2.0"
] | null | null | null |
back-end/RawFishSheep/app_order/migrations/0007_auto_20190516_1354.py
|
Coldarra/RawFishSheep
|
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
|
[
"Apache-2.0"
] | 4
|
2021-10-06T22:49:52.000Z
|
2022-02-27T12:28:18.000Z
|
back-end/RawFishSheep/app_order/migrations/0007_auto_20190516_1354.py
|
Coldarra/RawFishSheep
|
266bd9d8d9832d5c692b63e7515d45fdc4f6acc4
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.7 on 2019-05-16 05:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_order', '0006_auto_20190514_1659'),
]
operations = [
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(default='unprocessed', max_length=20, verbose_name='订单状态'),
),
]
| 22.684211
| 94
| 0.617169
|
fb65cd91fc9cfc8b6c0ffd1ac1767f72991b1ccd
| 6,462
|
py
|
Python
|
fhir/resources/STU3/triggerdefinition.py
|
ItayGoren/fhir.resources
|
3c45fddeb5a91ca0f02f9e28126b1f41a60360ba
|
[
"BSD-3-Clause"
] | 1
|
2020-08-31T09:52:25.000Z
|
2020-08-31T09:52:25.000Z
|
fhir/resources/STU3/triggerdefinition.py
|
arkhn/fhir.resources
|
122e89c8599c4034bb3075b31d1a1188e377db91
|
[
"BSD-3-Clause"
] | null | null | null |
fhir/resources/STU3/triggerdefinition.py
|
arkhn/fhir.resources
|
122e89c8599c4034bb3075b31d1a1188e377db91
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/TriggerDefinition
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
from typing import Any, Dict
from pydantic import Field, root_validator
from . import element, fhirtypes
class TriggerDefinition(element.Element):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Defines an expected trigger for a module.
A description of a triggering event.
"""
resource_type = Field("TriggerDefinition", const=True)
eventData: fhirtypes.DataRequirementType = Field(
None,
alias="eventData",
title="Triggering data of the event",
description="The triggering data of the event (if this is a data trigger).",
# if property is element of this resource.
element_property=True,
)
eventName: fhirtypes.String = Field(
None,
alias="eventName",
title="Triggering event name",
description="The name of the event (if this is a named-event trigger).",
# if property is element of this resource.
element_property=True,
)
eventName__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_eventName", title="Extension field for ``eventName``."
)
eventTimingDate: fhirtypes.Date = Field(
None,
alias="eventTimingDate",
title="Timing of the event",
description="The timing of the event (if this is a period trigger).",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e eventTiming[x]
one_of_many="eventTiming",
one_of_many_required=False,
)
eventTimingDate__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_eventTimingDate", title="Extension field for ``eventTimingDate``."
)
eventTimingDateTime: fhirtypes.DateTime = Field(
None,
alias="eventTimingDateTime",
title="Timing of the event",
description="The timing of the event (if this is a period trigger).",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e eventTiming[x]
one_of_many="eventTiming",
one_of_many_required=False,
)
eventTimingDateTime__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None,
alias="_eventTimingDateTime",
title="Extension field for ``eventTimingDateTime``.",
)
eventTimingReference: fhirtypes.ReferenceType = Field(
None,
alias="eventTimingReference",
title="Timing of the event",
description="The timing of the event (if this is a period trigger).",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e eventTiming[x]
one_of_many="eventTiming",
one_of_many_required=False,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Schedule"],
)
eventTimingTiming: fhirtypes.TimingType = Field(
None,
alias="eventTimingTiming",
title="Timing of the event",
description="The timing of the event (if this is a period trigger).",
# if property is element of this resource.
element_property=True,
# Choice of Data Types. i.e eventTiming[x]
one_of_many="eventTiming",
one_of_many_required=False,
)
type: fhirtypes.Code = Field(
...,
alias="type",
title=(
"named-event | periodic | data-added | data-modified | data-removed | "
"data-accessed | data-access-ended"
),
description="The type of triggering event.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=[
"named-event",
"periodic",
"data-added",
"data-modified",
"data-removed",
"data-accessed",
"data-access-ended",
],
)
type__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_type", title="Extension field for ``type``."
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"eventTiming": [
"eventTimingDate",
"eventTimingDateTime",
"eventTimingReference",
"eventTimingTiming",
]
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
| 37.137931
| 88
| 0.616063
|
d8ee5480a6cc314e88a834bf7042c479b2be3e89
| 6,259
|
py
|
Python
|
docs/source/01-AWS/12-Application-Integration/01-AWS-Simple-Queue-Service-Root/99-POC/FIFO-Q-Event-Source-for-Lambda-Concurrency/consumer.py
|
MacHu-GWU/Dev-Exp-Share
|
4215d3872e5b2b26c3a37301d0dbe39c2bfecaea
|
[
"MIT"
] | 2
|
2021-07-23T03:03:43.000Z
|
2021-10-04T12:03:54.000Z
|
docs/source/01-AWS/12-Application-Integration/01-AWS-Simple-Queue-Service-Root/99-POC/FIFO-Q-Event-Source-for-Lambda-Concurrency/consumer.py
|
MacHu-GWU/Dev-Exp-Share
|
4215d3872e5b2b26c3a37301d0dbe39c2bfecaea
|
[
"MIT"
] | 3
|
2021-09-23T23:32:14.000Z
|
2022-03-30T16:35:27.000Z
|
docs/source/01-AWS/12-Application-Integration/01-AWS-Simple-Queue-Service-Root/99-POC/FIFO-Q-Event-Source-for-Lambda-Concurrency/consumer.py
|
MacHu-GWU/Dev-Exp-Share
|
4215d3872e5b2b26c3a37301d0dbe39c2bfecaea
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import boto3
import pynamodb
import pynamodb.exceptions as exc
from pynamodb.attributes import (
UnicodeAttribute, BooleanAttribute, ListAttribute, NumberAttribute,
)
from pynamodb.models import Model
# --- Resovle Constant Value ---
def is_lambda_runtime():
return "AWS_LAMBDA_FUNCTION_NAME" in os.environ
if is_lambda_runtime():
aws_profile = None
else:
aws_profile = "eq_sanhe"
os.environ["AWS_PROFILE"] = "eq_sanhe"
ses = boto3.session.Session(profile_name=aws_profile)
sqs = boto3.resource("sqs")
q_url = "https://sqs.us-east-1.amazonaws.com/110330507156/poc-fifo-q.fifo"
q = sqs.Queue(q_url)
q_url = "https://sqs.us-east-1.amazonaws.com/110330507156/poc-fifo-q-dlq.fifo"
dlq = sqs.Queue(q_url)
# --- Define DynamoDB Model ---
class SQSTrackerModel(Model):
"""
A DynamoDB User
"""
class Meta:
table_name = "poc-fifo-q-sqs-tracker"
region = "us-east-1"
billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE
id = UnicodeAttribute(hash_key=True)
flag = BooleanAttribute()
class OutputModel(Model):
"""
This model can store up to 82500 member. For example this object is appr
256KB in dynamodb:
{
gid: "1",
processed: [1, 2, ..., 82500]
}
"""
class Meta:
table_name = "poc-fifo-q-output"
region = "us-east-1"
billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE
gid = UnicodeAttribute(hash_key=True)
processed = ListAttribute(of=NumberAttribute, default=list)
if not is_lambda_runtime():
SQSTrackerModel.create_table(wait=True)
OutputModel.create_table(wait=True)
def debug_consumer():
attempt_id = "7b3a46183cfedf9a72d87ca5cff1ec8b"
res = q.receive_messages(
MaxNumberOfMessages=1,
AttributeNames=["All", ],
ReceiveRequestAttemptId=attempt_id,
)
for msg in res:
print(msg.message_id)
print(msg.body)
print(msg.md5_of_body)
print(msg.receipt_handle)
print(msg.attributes)
def debug_output():
def strict_ascend(l):
previous = 0
for i in l:
if i > previous:
previous = i
continue
else:
raise ValueError
def validate_output(gid, processed):
if len(processed) != len(set(processed)):
print(f"{gid}: duplicate item found!")
else:
print(f"{gid}: no duplicate item.")
try:
strict_ascend(processed)
print(f"{gid}: is strictly in order.")
except ValueError:
print(f"{gid}: is not in order!")
total = 0
for o in OutputModel.scan():
print(o.gid, o.processed)
validate_output(o.gid, o.processed)
total += len(o.processed)
print(f"total processed item = {total}")
def clear_q_and_db():
"""
purge all message in all q, clear all data in dynamodb. reset test
environment.
"""
q.purge()
dlq.purge()
for model in [SQSTrackerModel, OutputModel]:
with model.batch_write() as batch:
item_list = list(model.scan())
for item in item_list:
batch.delete(item)
def main_good(event, context):
for record_dct in event["Records"]:
message_id = record_dct["messageId"]
# make sure this message is never successfully consumed before
try:
q_tracker = SQSTrackerModel.get(message_id)
if q_tracker.flag:
continue # skip this record
except SQSTrackerModel.DoesNotExist:
pass
except Exception as e:
# since if one failed, all message will go back to queue anyway
# we should stop earlier
raise e
# consume this message
gid, nth = record_dct["body"].split("-")
nth: int = int(nth)
try:
OutputModel(gid=gid).update(
actions=[
OutputModel.processed.set(OutputModel.processed.append([nth, ]))
],
condition=OutputModel.gid.exists(),
)
except exc.UpdateError as e:
if "The conditional request failed" in str(e):
OutputModel(gid=gid, processed=[nth, ]).save(
condition=OutputModel.gid.does_not_exist(),
)
else:
raise e
except Exception as e:
raise e
# mark this message "processed" in tracker
SQSTrackerModel(id=message_id, flag=True).save()
def main_with_error(event, context):
for record_dct in event["Records"]:
message_id = record_dct["messageId"]
# make sure this message is never successfully consumed before
try:
q_tracker = SQSTrackerModel.get(message_id)
if q_tracker.flag:
continue # skip this record
except SQSTrackerModel.DoesNotExist:
pass
except Exception as e:
# since if one failed, all message will go back to queue anyway
# we should stop earlier
raise e
# consume this message
gid, nth = record_dct["body"].split("-")
nth: int = int(nth)
if gid == "g1" and nth == 11: # g1 should only good up to some value before 11, could be random value from 1 - 11
raise ValueError
try:
OutputModel(gid=gid).update(
actions=[
OutputModel.processed.set(OutputModel.processed.append([nth, ]))
],
condition=OutputModel.gid.exists(),
)
except exc.UpdateError as e:
if "The conditional request failed" in str(e):
OutputModel(gid=gid, processed=[nth, ]).save(
condition=OutputModel.gid.does_not_exist(),
)
else:
raise e
except Exception as e:
raise e
# mark this message "processed" in tracker
SQSTrackerModel(id=message_id, flag=True).save()
main = main_with_error
if __name__ == "__main__":
pass
# debug_consumer()
# debug_output()
# clear_q_and_db()
| 27.69469
| 122
| 0.590989
|
76227904d46dc9a5113bd05a18ee9c05d7b410a1
| 276
|
py
|
Python
|
grampanchayat/grampanchayat/doctype/village_setting/village_setting.py
|
ramjanalilal/grampanchayat
|
de837d98974b0f9eda559a7ff20890edd814fe29
|
[
"MIT"
] | 1
|
2020-10-17T05:39:13.000Z
|
2020-10-17T05:39:13.000Z
|
grampanchayat/grampanchayat/doctype/village_setting/village_setting.py
|
ramjanalilal/grampanchayat
|
de837d98974b0f9eda559a7ff20890edd814fe29
|
[
"MIT"
] | null | null | null |
grampanchayat/grampanchayat/doctype/village_setting/village_setting.py
|
ramjanalilal/grampanchayat
|
de837d98974b0f9eda559a7ff20890edd814fe29
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, FinForce Consulting LLP and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class VillageSetting(Document):
pass
| 25.090909
| 62
| 0.782609
|
2142eb932c771e9f00f1f1bc8133bde8659c146a
| 18
|
py
|
Python
|
python/taichi/image/__init__.py
|
gonnavis/taichi
|
ba1898643e4548a23ecae340e963614b28b8a103
|
[
"MIT"
] | 2
|
2019-06-25T02:12:37.000Z
|
2019-06-25T02:12:48.000Z
|
python/taichi/image/__init__.py
|
gonnavis/taichi
|
ba1898643e4548a23ecae340e963614b28b8a103
|
[
"MIT"
] | null | null | null |
python/taichi/image/__init__.py
|
gonnavis/taichi
|
ba1898643e4548a23ecae340e963614b28b8a103
|
[
"MIT"
] | 1
|
2021-11-29T22:47:24.000Z
|
2021-11-29T22:47:24.000Z
|
from .hdr import *
| 18
| 18
| 0.722222
|
9e6c2b88b0e74795448071ee6c4ce5806b5d5377
| 907
|
py
|
Python
|
applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/conditionals3.py
|
schinmayee/nimbus
|
170cd15e24a7a88243a6ea80aabadc0fc0e6e177
|
[
"BSD-3-Clause"
] | 20
|
2017-07-03T19:09:09.000Z
|
2021-09-10T02:53:56.000Z
|
applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/conditionals3.py
|
schinmayee/nimbus
|
170cd15e24a7a88243a6ea80aabadc0fc0e6e177
|
[
"BSD-3-Clause"
] | null | null | null |
applications/physbam/physbam-lib/External_Libraries/Archives/boost/tools/build/v2/test/conditionals3.py
|
schinmayee/nimbus
|
170cd15e24a7a88243a6ea80aabadc0fc0e6e177
|
[
"BSD-3-Clause"
] | 9
|
2017-09-17T02:05:06.000Z
|
2020-01-31T00:12:01.000Z
|
#!/usr/bin/python
# Copyright (C) Vladimir Prus 2003. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
# Test that conditional properties work, even if property is free, and
# value includes colon.
from BoostBuild import Tester, List
t = Tester()
# Create the needed files
t.write("project-root.jam", "")
t.write("Jamfile", """
exe hello : hello.cpp : <variant>debug:<define>CLASS=Foo::Bar ;
""")
t.write("hello.cpp", """
namespace Foo { class Bar { } ; }
int main()
{
CLASS c;
return 0;
}
""")
# Don't check stderr, which can include warning about unused 'c'.
t.run_build_system(stdout=None, stderr=None)
t.expect_addition("bin/$toolset/debug/hello.exe")
t.cleanup()
| 25.914286
| 80
| 0.710033
|
3f7b4af7ddda5e355be2d9d9c4c589c892f1a719
| 3,559
|
py
|
Python
|
Battleships/Battleships.py
|
Dorthion/Python-Minigames
|
91ba20d42ac7376ccaad60cd948a576800085623
|
[
"MIT"
] | null | null | null |
Battleships/Battleships.py
|
Dorthion/Python-Minigames
|
91ba20d42ac7376ccaad60cd948a576800085623
|
[
"MIT"
] | null | null | null |
Battleships/Battleships.py
|
Dorthion/Python-Minigames
|
91ba20d42ac7376ccaad60cd948a576800085623
|
[
"MIT"
] | null | null | null |
import pygame
import os.path
import sys
from configparser import ConfigParser
from Source import Config
from Source import UI_functions as UI #UI - User Interface
from Source import Set_Player_ai_game as spa #spa - set player ai
from Source import Play_Player_ai_game as ppa #ppa - play player ai
from Source import Set_Ai_ai_game as saa #saa - set ai ai
from Source import Play_ai_ai_game as paa #paa - play ai ai
from Source import Options_game as opt #opt - options
#Init config file
global cfg
cfg = ConfigParser()
if os.path.isfile("./cfg.ini") == False:
cfg = Config.create_new_config()
else:
cfg.read("./cfg.ini")
#Init
pygame.init()
UI.load_config_file(cfg)
screen = pygame.display.set_mode((cfg["Basic"].getint("WIDTH"),cfg["Basic"].getint("HEIGHT")))
pygame.display.set_caption(cfg["Basic"]["TITLE"])
pygame.time.Clock().tick(cfg["Basic"].getint("FPS"))
#Resources (Images, Icons, Fonts)
icon = pygame.image.load("Assets/Images/ship.png")
bg = pygame.image.load("Assets/Images/CleanBackground.png")
menubg = pygame.image.load("Assets/Images/MenuGame.png")
font = pygame.font.Font("Assets/Font/impact.ttf", 24)
pygame.display.set_icon(icon)
#Initial values
CLICK = False
RUNNING = True
RUN_BTN = False
rects, text_pos = UI.Rect_Main_Menu()
texts = [font.render("PLAY PLAYER", True, (52, 52, 54)),
font.render("PLAY AI", True, (52, 52, 54)),
font.render("OPTIONS", True, (52, 52, 54)),
font.render("EXIT", True, (52, 52, 54))]
#InGame
while RUNNING:
#Screen properties per update and Draw Buttons
mx, my = pygame.mouse.get_pos()
screen.blit(menubg,(0,0))
UI.Draw_Pos(screen, texts, text_pos)
#Buttons functions
if CLICK:
Temp_width = cfg["Basic"].getint("WIDTH")
Temp_height = cfg["Basic"].getint("HEIGHT")
#Play_Player_vs_AI
if rects[0].collidepoint((mx,my)):
play = True
RUN_BTN = True
while play == True:
pmab, bmap, play = spa.Play_Game(screen, bg, cfg)
if play == True:
ppa.Play_Game(screen, bg, pmab, bmap, cfg)
#Play_AI_vs_AI
if rects[1].collidepoint((mx,my)):
play = True
RUN_BTN = True
while play == True:
bmap1, bmap2, play = saa.Play_Game(screen, bg, cfg)
if play == True:
paa.Play_Game(screen, bg, bmap1, bmap2, cfg)
#Options
if rects[2].collidepoint((mx,my)):
RUN_BTN = True
config, changed_game_opt = opt.run(screen, bg, cfg)
if changed_game_opt:
cfg = opt.save_new_conf(config)
UI.load_config_file(cfg)
#Exit_Game
if rects[3].collidepoint((mx,my)):
RUNNING = False
break
if RUN_BTN == True:
cfg.set("Basic","WIDTH", str(Temp_width))
cfg.set("Basic","HEIGHT", str(Temp_height))
screen = pygame.display.set_mode((Temp_width, Temp_height))
RUN_BTN = False
#Events and update
CLICK = False
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
RUNNING = False
break
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
CLICK = True
#Quit game
pygame.display.quit()
pygame.quit()
| 32.354545
| 94
| 0.588648
|
0cd6e7f7557a8945490ab383eee75a6fa7d4189a
| 10,429
|
py
|
Python
|
src/metrics/coref.py
|
klimzaporojets/e2e-knowledge-ie
|
284ff2ba61b32659618735ba09ee06a8745af850
|
[
"Apache-2.0"
] | 5
|
2021-07-07T11:50:31.000Z
|
2021-09-17T00:54:19.000Z
|
src/metrics/coref.py
|
klimzaporojets/e2e-knowledge-ie
|
284ff2ba61b32659618735ba09ee06a8745af850
|
[
"Apache-2.0"
] | 2
|
2021-07-19T05:21:52.000Z
|
2022-01-22T21:47:52.000Z
|
src/metrics/coref.py
|
klimzaporojets/e2e-knowledge-ie
|
284ff2ba61b32659618735ba09ee06a8745af850
|
[
"Apache-2.0"
] | null | null | null |
from collections import Counter
import numpy as np
import torch
from scipy.optimize import linear_sum_assignment
## MOSTLY COPIED FROM ALLENNLP
def decode_m2i(scores, lengths):
output = []
for b, length in enumerate(lengths.tolist()):
m2i = list(range(length))
if length > 0:
# print('scores:', length, scores[b, 0:length, :])
# _, indices = torch.max(scores[b, 0:length, 0:length], -1)
_, indices = torch.max(scores[b, 0:length, :], -1)
for src, dst in enumerate(indices.tolist()):
if src < len(m2i) and dst < len(m2i):
m2i[src] = m2i[dst]
else:
# sanity check: this should never ever happen !!!
print("ERROR: invalid index")
print("length:", length)
print("scores:", scores[b, 0:length, :])
print("scores:", scores.min().item(), scores.max().item())
print("indices:", indices)
print("LENGTHS:", lengths)
exit(1)
output.append(m2i)
return output
def m2i_to_clusters(m2i):
clusters = {}
m2c = {}
for m, c in enumerate(m2i):
if c not in clusters:
clusters[c] = []
clusters[c].append(m)
m2c[m] = clusters[c]
return list(clusters.values()), m2c
def mention2cluster(clusters):
clusters = [tuple(tuple(m) for m in gc) for gc in clusters]
mention_to_cluster = {}
for cluster in clusters:
for mention in cluster:
mention_to_cluster[mention] = cluster
return mention_to_cluster
class MetricCoref:
def __init__(self, task, name, m, verbose=False):
self.task = task
self.name = name
self.m = m
self.iter = 0
self.clear()
self.max_iter = 0
self.max_f1 = 0
self.verbose = verbose
def clear(self):
self.precision_numerator = 0
self.precision_denominator = 0
self.recall_numerator = 0
self.recall_denominator = 0
def step(self):
self.clear()
self.iter += 1
def update(self, scores, targets, args, metadata):
gold_m2is, lengths, mentions = args['gold_m2is'], args['lengths'], args['mentions']
pred_m2is = decode_m2i(scores, lengths)
gold_m2is = [x.tolist() for x in gold_m2is]
for pred_m2i, gold_m2i, identifier, ms in zip(pred_m2is, gold_m2is, metadata['identifiers'], mentions):
pred_clusters, _ = m2i_to_clusters(pred_m2i)
gold_clusters, _ = m2i_to_clusters(gold_m2i)
p_num, p_den = self.m(pred_clusters, {k: (v, v) for k, v in enumerate(gold_m2i)})
r_num, r_den = self.m(gold_clusters, {k: (v, v) for k, v in enumerate(pred_m2i)})
if self.verbose:
print("ID", identifier)
print('pred_clusters:', pred_clusters)
print('gold_clusters:', gold_clusters)
print('precision: {} / {}'.format(p_num, p_den))
print('recall: {} / {}'.format(r_num, r_den))
print()
self.precision_numerator += p_num
self.precision_denominator += p_den
self.recall_numerator += r_num
self.recall_denominator += r_den
def add(self, pred, gold):
if self.m == self.ceafe:
p_num, p_den, r_num, r_den = self.m(pred, gold)
else:
p_num, p_den = self.m(pred, mention2cluster(gold))
r_num, r_den = self.m(gold, mention2cluster(pred))
self.precision_numerator += p_num
self.precision_denominator += p_den
self.recall_numerator += r_num
self.recall_denominator += r_den
def update2(self, output_dict, metadata):
# print("UPDATE2")
# print("pred:", output_dict["pred"])
# print("gold:", output_dict["gold"])
# print()
# print("clusters:", output_dict["clusters"])
# print("coref_gold:", output_dict["coref_gold"])
for pred, gold, identifier, tokens in zip(output_dict["pred"], output_dict["gold"], metadata['identifiers'],
metadata['tokens']):
if self.m == self.ceafe:
p_num, p_den, r_num, r_den = self.m(pred, gold)
else:
p_num, p_den = self.m(pred, mention2cluster(gold))
r_num, r_den = self.m(gold, mention2cluster(pred))
if self.verbose:
print("ID", identifier)
print("pred:", pred)
print("gold:", gold)
print("pred:", [[' '.join(tokens[begin:(end + 1)]) for begin, end in cluster] for cluster in pred])
print("gold:", [[' '.join(tokens[begin:(end + 1)]) for begin, end in cluster] for cluster in gold])
print('precision: {} / {}'.format(p_num, p_den))
print('recall: {} / {}'.format(r_num, r_den))
print()
self.precision_numerator += p_num
self.precision_denominator += p_den
self.recall_numerator += r_num
self.recall_denominator += r_den
def get_f1(self):
precision = 0 if self.precision_denominator == 0 else \
self.precision_numerator / float(self.precision_denominator)
recall = 0 if self.recall_denominator == 0 else \
self.recall_numerator / float(self.recall_denominator)
return 0 if precision + recall == 0 else 2 * precision * recall / (precision + recall)
def print(self, dataset_name, details=False):
f1 = self.get_f1()
if f1 > self.max_f1:
self.max_f1 = f1
self.max_iter = self.iter
print('EVAL-COREF\t{}-{}\tcurr-iter: {}\t{}-f1: {}\tmax-iter: {}\tmax-{}-f1: {}'
'\tstall: {}'.format(dataset_name,
self.task,
self.iter,
self.name,
f1,
self.max_iter,
self.name,
self.max_f1,
self.iter - self.max_iter))
def log(self, tb_logger, dataset_name):
tb_logger.log_value('{}/{}-f1'.format(dataset_name, self.name), self.get_f1(), self.iter)
@staticmethod
def b_cubed(clusters, mention_to_gold):
numerator, denominator = 0, 0
for cluster in clusters:
if len(cluster) == 1:
continue
gold_counts = Counter()
correct = 0
for mention in cluster:
if mention in mention_to_gold:
gold_counts[tuple(mention_to_gold[mention])] += 1
for cluster2, count in gold_counts.items():
if len(cluster2) != 1:
correct += count * count
numerator += correct / float(len(cluster))
denominator += len(cluster)
return numerator, denominator
@staticmethod
def muc(clusters, mention_to_gold):
true_p, all_p = 0, 0
for cluster in clusters:
all_p += len(cluster) - 1
true_p += len(cluster)
linked = set()
for mention in cluster:
if mention in mention_to_gold:
linked.add(mention_to_gold[mention])
else:
true_p -= 1
true_p -= len(linked)
return true_p, all_p
@staticmethod
def phi4(gold_clustering, predicted_clustering):
"""
Subroutine for ceafe. Computes the mention F measure between gold and
predicted mentions in a cluster.
"""
return (
2
* len([mention for mention in gold_clustering if mention in predicted_clustering])
/ float(len(gold_clustering) + len(predicted_clustering))
)
@staticmethod
def ceafe(clusters, gold_clusters):
"""
Computes the Constrained EntityAlignment F-Measure (CEAF) for evaluating coreference.
Gold and predicted mentions are aligned into clusterings which maximise a metric - in
this case, the F measure between gold and predicted clusters.
<https://www.semanticscholar.org/paper/On-Coreference-Resolution-Performance-Metrics-Luo/de133c1f22d0dfe12539e25dda70f28672459b99>
"""
clusters = [cluster for cluster in clusters if len(cluster) != 1]
gold_clusters = [cluster for cluster in gold_clusters if len(cluster) != 1] # is this really correct?
scores = np.zeros((len(gold_clusters), len(clusters)))
for i, gold_cluster in enumerate(gold_clusters):
for j, cluster in enumerate(clusters):
scores[i, j] = MetricCoref.phi4(gold_cluster, cluster)
row, col = linear_sum_assignment(-scores)
similarity = sum(scores[row, col])
return similarity, len(clusters), similarity, len(gold_clusters)
class MetricCorefAverage:
def __init__(self, task, name, metrics):
self.task = task
self.name = name
self.metrics = metrics
self.iter = 0
self.max_f1 = 0
self.max_iter = 0
def step(self):
self.iter += 1
def update2(self, output_dict, metadata):
return
def get_f1(self):
scores = [x.get_f1() for x in self.metrics]
return sum(scores) / len(scores) if len(scores) > 0 else 0.0
def print(self, dataset_name, details=False):
f1 = self.get_f1()
if f1 > self.max_f1:
self.max_f1 = f1
self.max_iter = self.iter
print('EVAL-COREF\t{}-{}\tcurr-iter: {}\t{}-f1: {}\tmax-iter: {}\tmax-{}-f1: {}'
'\tstall: {}'.format(dataset_name,
self.task,
self.iter,
self.name,
f1,
self.max_iter,
self.name,
self.max_f1,
self.iter - self.max_iter))
def log(self, tb_logger, dataset_name):
tb_logger.log_value('{}/{}-f1'.format(dataset_name, self.name), self.get_f1(), self.iter)
| 38.062044
| 138
| 0.542046
|
413fdbb205e160f70281d3c8d86d36ceca46ebef
| 607
|
py
|
Python
|
portfolio/migrations/0013_auto_20210127_2330.py
|
HerbertRamirez/inmo_web
|
3c48911d5e3ee7a75580534664874ba5e8281e55
|
[
"MIT"
] | null | null | null |
portfolio/migrations/0013_auto_20210127_2330.py
|
HerbertRamirez/inmo_web
|
3c48911d5e3ee7a75580534664874ba5e8281e55
|
[
"MIT"
] | null | null | null |
portfolio/migrations/0013_auto_20210127_2330.py
|
HerbertRamirez/inmo_web
|
3c48911d5e3ee7a75580534664874ba5e8281e55
|
[
"MIT"
] | 3
|
2021-02-18T15:11:53.000Z
|
2021-02-20T17:34:01.000Z
|
# Generated by Django 3.1.2 on 2021-01-27 23:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0012_auto_20210127_0722'),
]
operations = [
migrations.AddField(
model_name='inmueble',
name='map',
field=models.URLField(default='sdasad'),
),
migrations.AlterField(
model_name='inmueble',
name='cerc3',
field=models.CharField(blank=True, default="A x calles del mcdonald's de la avenida x", max_length=200),
),
]
| 25.291667
| 116
| 0.589786
|
c8707fa7c89c821c560b9051a443a2114c1fadd4
| 3,701
|
py
|
Python
|
mmocr/models/textrecog/recognizer/master.py
|
zezeze97/image2latex
|
c745667cd1af91dbff2385dcf2f2b80b9a40adb6
|
[
"Apache-2.0"
] | 4
|
2022-01-03T06:52:30.000Z
|
2022-01-17T02:30:25.000Z
|
mmocr/models/textrecog/recognizer/master.py
|
zezeze97/image2latex
|
c745667cd1af91dbff2385dcf2f2b80b9a40adb6
|
[
"Apache-2.0"
] | null | null | null |
mmocr/models/textrecog/recognizer/master.py
|
zezeze97/image2latex
|
c745667cd1af91dbff2385dcf2f2b80b9a40adb6
|
[
"Apache-2.0"
] | 1
|
2021-12-31T04:47:16.000Z
|
2021-12-31T04:47:16.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.builder import DETECTORS, build_backbone, build_loss
from mmocr.models.builder import (build_convertor, build_decoder,
build_encoder, build_preprocessor)
from .encode_decode_recognizer import EncodeDecodeRecognizer
@DETECTORS.register_module()
class MASTER(EncodeDecodeRecognizer):
# need to inherit BaseRecognizer or EncodeDecodeRecognizer in mmocr
def __init__(self,
preprocessor=None,
backbone=None,
encoder=None,
decoder=None,
loss=None,
label_convertor=None,
train_cfg=None,
test_cfg=None,
max_seq_len=40,
pretrained=None):
super(MASTER, self).__init__(preprocessor,
backbone,
encoder,
decoder,
loss,
label_convertor,
train_cfg,
test_cfg,
max_seq_len,
pretrained)
def init_weights(self, pretrained=None):
for p in self.parameters():
if p.dim()>1:
nn.init.xavier_uniform_(p)
def forward_train(self, img, img_metas):
"""
Args:
img (tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A list of image info dict where each dict
contains: 'img_shape', 'filename', and may also contain
'ori_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
Returns:
dict[str, tensor]: A dictionary of loss components.
"""
feat = self.extract_feat(img)
feat = feat[-1]
gt_labels = [img_meta['text'] for img_meta in img_metas]
targets_dict = self.label_convertor.str2tensor(gt_labels)
out_enc = None
if self.encoder is not None:
out_enc = self.encoder(feat)
# print('out_enc size: ',out_enc.size())
out_dec = self.decoder(
feat, out_enc, targets_dict, img_metas, train_mode=True)
loss_inputs = (
out_dec,
targets_dict,
img_metas,
)
losses = self.loss(*loss_inputs)
return losses
def simple_test(self, img, img_metas, **kwargs):
"""Test function with test time augmentation.
Args:
imgs (torch.Tensor): Image input tensor.
img_metas (list[dict]): List of image information.
Returns:
list[str]: Text label result of each image.
"""
feat = self.extract_feat(img)
feat = feat[-1]
out_enc = None
if self.encoder is not None:
out_enc = self.encoder(feat)
out_dec = self.decoder(
feat, out_enc, None, img_metas, train_mode=False)
label_indexes, label_scores = self.label_convertor.tensor2idx(
out_dec, img_metas)
label_strings = self.label_convertor.idx2str(label_indexes)
# flatten batch results
results = []
for string, score in zip(label_strings, label_scores):
results.append(dict(text=string, score=score))
return results
| 34.268519
| 77
| 0.53418
|
798183f12a474df1ce498c36f44c15502b5a08db
| 897
|
py
|
Python
|
src/account/azext_account/generated/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 207
|
2017-11-29T06:59:41.000Z
|
2022-03-31T10:00:53.000Z
|
src/account/azext_account/generated/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 4,061
|
2017-10-27T23:19:56.000Z
|
2022-03-31T23:18:30.000Z
|
src/account/azext_account/generated/_validators.py
|
Mannan2812/azure-cli-extensions
|
e2b34efe23795f6db9c59100534a40f0813c3d95
|
[
"MIT"
] | 802
|
2017-10-11T17:36:26.000Z
|
2022-03-31T22:24:32.000Z
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
def alias_validator(namespace):
if namespace.subscription_id:
if namespace.billing_scope or namespace.display_name:
raise CLIError('--billing-scope or --display-name is not allowed when --subscription-id is provided.')
else:
if not namespace.billing_scope or not namespace.display_name or not namespace.workload:
raise CLIError('--billing-scope, --display-name and --workload are required when creating '
'an alias with a new susbcription.')
| 52.764706
| 114
| 0.561873
|
d95a2574f9bf85e461d88c9671a6d190ca2601b8
| 861
|
py
|
Python
|
measurement.py
|
thomasjsn/python-sios
|
b1a1415b5a3f9e0594d0ab54b6795100ade1a414
|
[
"MIT"
] | null | null | null |
measurement.py
|
thomasjsn/python-sios
|
b1a1415b5a3f9e0594d0ab54b6795100ade1a414
|
[
"MIT"
] | null | null | null |
measurement.py
|
thomasjsn/python-sios
|
b1a1415b5a3f9e0594d0ab54b6795100ade1a414
|
[
"MIT"
] | null | null | null |
import statistics
class Measurement:
def __init__(self, decimals=2, length=300):
self.__decimals = decimals
self.__values = list()
self.__length = length
@property
def length(self):
return len(self.__values)
@property
def mean(self):
return round(statistics.mean(self.__values), self.__decimals)
@property
def stdev(self):
return round(statistics.stdev(self.__values), self.__decimals) if len(self.__values) > 1 else -1
@property
def rising(self):
return (self.get(3) - self.mean) > (self.stdev * 3)
def add(self, value):
self.__values.append(float(value))
if len(self.__values) > self.__length:
del self.__values[0]
def get(self, length):
return round(statistics.mean(self.__values[length * -1:]), self.__decimals)
| 25.323529
| 104
| 0.630662
|
8d5e879176159cfcc1b9c18b26323a06b629a681
| 230
|
py
|
Python
|
Core/Block_0/R0150_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | 1
|
2021-04-25T13:53:20.000Z
|
2021-04-25T13:53:20.000Z
|
Core/Block_0/R0150_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | null | null | null |
Core/Block_0/R0150_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
|
10b4697833c561d24654251da5f22d044f03fc16
|
[
"MIT"
] | null | null | null |
from Core.IFactory import IFactory
from Regs.Block_0 import R0150
class R0150Factory(IFactory):
def create_block_object(self, line):
self.r0150 = _r0150 = R0150()
_r0150.reg_list = line
return _r0150
| 23
| 40
| 0.7
|
4168c5aeda2b43cf3bbbaa7a5e7348af1ba41d50
| 19,347
|
py
|
Python
|
SmartObjectFramework/ObjectService/rdflib/plugins/sleepycat.py
|
OSIOT/OBSOLETE-IoT-Toolkit
|
f6cfb116a9f115d5ce288d990b57e0f6855283cc
|
[
"Apache-2.0"
] | 1
|
2019-06-12T19:37:58.000Z
|
2019-06-12T19:37:58.000Z
|
SmartObjectFramework/ObjectService/rdflib/plugins/sleepycat.py
|
OSIOT/OBSOLETE-IoT-Toolkit
|
f6cfb116a9f115d5ce288d990b57e0f6855283cc
|
[
"Apache-2.0"
] | null | null | null |
SmartObjectFramework/ObjectService/rdflib/plugins/sleepycat.py
|
OSIOT/OBSOLETE-IoT-Toolkit
|
f6cfb116a9f115d5ce288d990b57e0f6855283cc
|
[
"Apache-2.0"
] | null | null | null |
from rdflib.store import Store, VALID_STORE, CORRUPTED_STORE, NO_STORE, UNKNOWN
from rdflib.term import URIRef
from rdflib.py3compat import b
def bb(u): return u.encode('utf-8')
try:
from bsddb import db
has_bsddb = True
except ImportError:
try:
from bsddb3 import db
has_bsddb = True
except ImportError:
has_bsddb = False
from os import mkdir
from os.path import exists, abspath
from urllib import pathname2url
from threading import Thread
import logging
_logger = logging.getLogger(__name__)
__all__ = ['Sleepycat']
class Sleepycat(Store):
context_aware = True
formula_aware = True
transaction_aware = False
db_env = None
def __init__(self, configuration=None, identifier=None):
if not has_bsddb: raise ImportError("Unable to import bsddb/bsddb3, store is unusable.")
self.__open = False
self.__identifier = identifier
super(Sleepycat, self).__init__(configuration)
self._loads = self.node_pickler.loads
self._dumps = self.node_pickler.dumps
def __get_identifier(self):
return self.__identifier
identifier = property(__get_identifier)
def _init_db_environment(self, homeDir, create=True):
envsetflags = db.DB_CDB_ALLDB
envflags = db.DB_INIT_MPOOL | db.DB_INIT_CDB | db.DB_THREAD
if not exists(homeDir):
if create==True:
mkdir(homeDir) # TODO: implement create method and refactor this to it
self.create(homeDir)
else:
return NO_STORE
db_env = db.DBEnv()
db_env.set_cachesize(0, 1024*1024*50) # TODO
#db_env.set_lg_max(1024*1024)
db_env.set_flags(envsetflags, 1)
db_env.open(homeDir, envflags | db.DB_CREATE)
return db_env
def is_open(self):
return self.__open
def open(self, path, create=True):
if not has_bsddb: return NO_STORE
homeDir = path
if self.__identifier is None:
self.__identifier = URIRef(pathname2url(abspath(homeDir)))
db_env = self._init_db_environment(homeDir, create)
if db_env == NO_STORE:
return NO_STORE
self.db_env = db_env
self.__open = True
dbname = None
dbtype = db.DB_BTREE
# auto-commit ensures that the open-call commits when transactions are enabled
dbopenflags = db.DB_THREAD
if self.transaction_aware == True:
dbopenflags |= db.DB_AUTO_COMMIT
dbmode = 0660
dbsetflags = 0
# create and open the DBs
self.__indicies = [None,] * 3
self.__indicies_info = [None,] * 3
for i in xrange(0, 3):
index_name = to_key_func(i)((b("s"), b("p"), b("o")), b("c")).decode()
index = db.DB(db_env)
index.set_flags(dbsetflags)
index.open(index_name, dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__indicies[i] = index
self.__indicies_info[i] = (index, to_key_func(i), from_key_func(i))
lookup = {}
for i in xrange(0, 8):
results = []
for start in xrange(0, 3):
score = 1
len = 0
for j in xrange(start, start+3):
if i & (1<<(j%3)):
score = score << 1
len += 1
else:
break
tie_break = 2-start
results.append(((score, tie_break), start, len))
results.sort()
score, start, len = results[-1]
def get_prefix_func(start, end):
def get_prefix(triple, context):
if context is None:
yield ""
else:
yield context
i = start
while i<end:
yield triple[i%3]
i += 1
yield ""
return get_prefix
lookup[i] = (self.__indicies[start], get_prefix_func(start, start + len), from_key_func(start), results_from_key_func(start, self._from_string))
self.__lookup_dict = lookup
self.__contexts = db.DB(db_env)
self.__contexts.set_flags(dbsetflags)
self.__contexts.open("contexts", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__namespace = db.DB(db_env)
self.__namespace.set_flags(dbsetflags)
self.__namespace.open("namespace", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__prefix = db.DB(db_env)
self.__prefix.set_flags(dbsetflags)
self.__prefix.open("prefix", dbname, dbtype, dbopenflags|db.DB_CREATE, dbmode)
self.__k2i = db.DB(db_env)
self.__k2i.set_flags(dbsetflags)
self.__k2i.open("k2i", dbname, db.DB_HASH, dbopenflags|db.DB_CREATE, dbmode)
self.__i2k = db.DB(db_env)
self.__i2k.set_flags(dbsetflags)
self.__i2k.open("i2k", dbname, db.DB_RECNO, dbopenflags|db.DB_CREATE, dbmode)
self.__needs_sync = False
t = Thread(target=self.__sync_run)
t.setDaemon(True)
t.start()
self.__sync_thread = t
return VALID_STORE
def __sync_run(self):
from time import sleep, time
try:
min_seconds, max_seconds = 10, 300
while self.__open:
if self.__needs_sync:
t0 = t1 = time()
self.__needs_sync = False
while self.__open:
sleep(.1)
if self.__needs_sync:
t1 = time()
self.__needs_sync = False
if time()-t1 > min_seconds or time()-t0 > max_seconds:
self.__needs_sync = False
_logger.debug("sync")
self.sync()
break
else:
sleep(1)
except Exception, e:
_logger.exception(e)
def sync(self):
if self.__open:
for i in self.__indicies:
i.sync()
self.__contexts.sync()
self.__namespace.sync()
self.__prefix.sync()
self.__i2k.sync()
self.__k2i.sync()
def close(self, commit_pending_transaction=False):
self.__open = False
self.__sync_thread.join()
for i in self.__indicies:
i.close()
self.__contexts.close()
self.__namespace.close()
self.__prefix.close()
self.__i2k.close()
self.__k2i.close()
self.db_env.close()
def add(self, (subject, predicate, object), context, quoted=False, txn=None):
"""\
Add a triple to the store of triples.
"""
assert self.__open, "The Store must be open."
assert context!=self, "Can not add triple directly to store"
Store.add(self, (subject, predicate, object), context, quoted)
_to_string = self._to_string
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
cspo, cpos, cosp = self.__indicies
value = cspo.get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is None:
self.__contexts.put(bb(c), "", txn=txn)
contexts_value = cspo.get(bb("%s^%s^%s^%s^" % ("", s, p, o)), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.add(bb(c))
contexts_value = b("^").join(contexts)
assert contexts_value!=None
cspo.put(bb("%s^%s^%s^%s^" % (c, s, p, o)), "", txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % (c, p, o, s)), "", txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % (c, o, s, p)), "", txn=txn)
if not quoted:
cspo.put(bb("%s^%s^%s^%s^" % ("", s, p, o)), contexts_value, txn=txn)
cpos.put(bb("%s^%s^%s^%s^" % ("", p, o, s)), contexts_value, txn=txn)
cosp.put(bb("%s^%s^%s^%s^" % ("", o, s, p)), contexts_value, txn=txn)
self.__needs_sync = True
def __remove(self, (s, p, o), c, quoted=False, txn=None):
cspo, cpos, cosp = self.__indicies
contexts_value = cspo.get(b("^").join([b(""), s, p, o, b("")]), txn=txn) or b("")
contexts = set(contexts_value.split(b("^")))
contexts.discard(c)
contexts_value = b("^").join(contexts)
for i, _to_key, _from_key in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
if not quoted:
if contexts_value:
for i, _to_key, _from_key in self.__indicies_info:
i.put(_to_key((s, p, o), b("")), contexts_value, txn=txn)
else:
for i, _to_key, _from_key in self.__indicies_info:
try:
i.delete(_to_key((s, p, o), b("")), txn=txn)
except db.DBNotFoundError, e:
pass # TODO: is it okay to ignore these?
def remove(self, (subject, predicate, object), context, txn=None):
assert self.__open, "The Store must be open."
Store.remove(self, (subject, predicate, object), context)
_to_string = self._to_string
if context is not None:
if context == self:
context = None
if subject is not None and predicate is not None and object is not None and context is not None:
s = _to_string(subject, txn=txn)
p = _to_string(predicate, txn=txn)
o = _to_string(object, txn=txn)
c = _to_string(context, txn=txn)
value = self.__indicies[0].get(bb("%s^%s^%s^%s^" % (c, s, p, o)), txn=txn)
if value is not None:
self.__remove((bb(s), bb(p), bb(o)), bb(c), txn=txn)
self.__needs_sync = True
else:
cspo, cpos, cosp = self.__indicies
index, prefix, from_key, results_from_key = self.__lookup((subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
needs_sync = True
except db.DBNotFoundError:
current = None
needs_sync = False
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key.startswith(prefix):
c, s, p, o = from_key(key)
if context is None:
contexts_value = index.get(key, txn=txn) or b("")
contexts = set(contexts_value.split(b("^"))) # remove triple from all non quoted contexts
contexts.add(b("")) # and from the conjunctive index
for c in contexts:
for i, _to_key, _ in self.__indicies_info:
i.delete(_to_key((s, p, o), c), txn=txn)
else:
self.__remove((s, p, o), c, txn=txn)
else:
break
if context is not None:
if subject is None and predicate is None and object is None:
# TODO: also if context becomes empty and not just on remove((None, None, None), c)
try:
self.__contexts.delete(bb(_to_string(context, txn=txn)), txn=txn)
except db.DBNotFoundError, e:
pass
self.__needs_sync = needs_sync
def triples(self, (subject, predicate, object), context=None, txn=None):
"""A generator over all the triples matching """
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
_from_string = self._from_string
index, prefix, from_key, results_from_key = self.__lookup((subject, predicate, object), context, txn=txn)
cursor = index.cursor(txn=txn)
try:
current = cursor.set_range(prefix)
except db.DBNotFoundError:
current = None
cursor.close()
while current:
key, value = current
cursor = index.cursor(txn=txn)
try:
cursor.set_range(key)
# Cheap hack so 2to3 doesn't convert to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
if key and key.startswith(prefix):
contexts_value = index.get(key, txn=txn)
yield results_from_key(key, subject, predicate, object, contexts_value)
else:
break
def __len__(self, context=None):
assert self.__open, "The Store must be open."
if context is not None:
if context == self:
context = None
if context is None:
prefix = b("^")
else:
prefix = bb("%s^" % self._to_string(context))
index = self.__indicies[0]
cursor = index.cursor()
current = cursor.set_range(prefix)
count = 0
while current:
key, value = current
if key.startswith(prefix):
count +=1
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
else:
break
cursor.close()
return count
def bind(self, prefix, namespace):
prefix = prefix.encode("utf-8")
namespace = namespace.encode("utf-8")
bound_prefix = self.__prefix.get(namespace)
if bound_prefix:
self.__namespace.delete(bound_prefix)
self.__prefix[namespace] = prefix
self.__namespace[prefix] = namespace
def namespace(self, prefix):
prefix = prefix.encode("utf-8")
ns = self.__namespace.get(prefix, None)
if ns is not None:
return URIRef(ns.decode('utf-8'))
return None
def prefix(self, namespace):
namespace = namespace.encode("utf-8")
prefix = self.__prefix.get(namespace, None)
if prefix is not None:
return prefix.decode('utf-8')
return None
def namespaces(self):
cursor = self.__namespace.cursor()
results = []
current = cursor.first()
while current:
prefix, namespace = current
results.append((prefix.decode('utf-8'), namespace.decode('utf-8')))
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
cursor.close()
for prefix, namespace in results:
yield prefix, URIRef(namespace)
def contexts(self, triple=None):
_from_string = self._from_string
_to_string = self._to_string
if triple:
s, p, o = triple
s = _to_string(s)
p = _to_string(p)
o = _to_string(o)
contexts = self.__indicies[0].get(bb("%s^%s^%s^%s^" % ("", s, p, o)))
if contexts:
for c in contexts.split(b("^")):
if c:
yield _from_string(c)
else:
index = self.__contexts
cursor = index.cursor()
current = cursor.first()
cursor.close()
while current:
key, value = current
context = _from_string(key)
yield context
cursor = index.cursor()
try:
cursor.set_range(key)
# Hack to stop 2to3 converting this to next(cursor)
current = getattr(cursor, 'next')()
except db.DBNotFoundError:
current = None
cursor.close()
def _from_string(self, i):
k = self.__i2k.get(int(i))
return self._loads(k)
def _to_string(self, term, txn=None):
k = self._dumps(term)
i = self.__k2i.get(k, txn=txn)
if i is None:
# weird behavoir from bsddb not taking a txn as a keyword argument
# for append
if self.transaction_aware:
i = "%s" % self.__i2k.append(k, txn)
else:
i = "%s" % self.__i2k.append(k)
self.__k2i.put(k, i, txn=txn)
else:
i = i.decode()
return i
def __lookup(self, (subject, predicate, object), context, txn=None):
_to_string = self._to_string
if context is not None:
context = _to_string(context, txn=txn)
i = 0
if subject is not None:
i += 1
subject = _to_string(subject, txn=txn)
if predicate is not None:
i += 2
predicate = _to_string(predicate, txn=txn)
if object is not None:
i += 4
object = _to_string(object, txn=txn)
index, prefix_func, from_key, results_from_key = self.__lookup_dict[i]
#print (subject, predicate, object), context, prefix_func, index #DEBUG
prefix = bb("^".join(prefix_func((subject, predicate, object), context)))
return index, prefix, from_key, results_from_key
def to_key_func(i):
def to_key(triple, context):
"Takes a string; returns key"
return b("^").join((context, triple[i%3], triple[(i+1)%3], triple[(i+2)%3], b(""))) # "" to tac on the trailing ^
return to_key
def from_key_func(i):
def from_key(key):
"Takes a key; returns string"
parts = key.split(b("^"))
return parts[0], parts[(3-i+0)%3+1], parts[(3-i+1)%3+1], parts[(3-i+2)%3+1]
return from_key
def results_from_key_func(i, from_string):
def from_key(key, subject, predicate, object, contexts_value):
"Takes a key and subject, predicate, object; returns tuple for yield"
parts = key.split(b("^"))
if subject is None:
# TODO: i & 1: # dis assemble and/or measure to see which is faster
# subject is None or i & 1
s = from_string(parts[(3-i+0)%3+1])
else:
s = subject
if predicate is None:#i & 2:
p = from_string(parts[(3-i+1)%3+1])
else:
p = predicate
if object is None:#i & 4:
o = from_string(parts[(3-i+2)%3+1])
else:
o = object
return (s, p, o), (from_string(c) for c in contexts_value.split(b("^")) if c)
return from_key
def readable_index(i):
s, p, o = "?" * 3
if i & 1: s = "s"
if i & 2: p = "p"
if i & 4: o = "o"
return "%s,%s,%s" % (s, p, o)
| 36.366541
| 156
| 0.529746
|
47863e6326596d946086e2f6ecb631a09d35c1f5
| 1,660
|
py
|
Python
|
cnn/main.py
|
arielrodrigues/nlp-relations-extraction-ptbr
|
421db8ea0443f1a4248b62aea17adb0fea69d1f3
|
[
"MIT"
] | null | null | null |
cnn/main.py
|
arielrodrigues/nlp-relations-extraction-ptbr
|
421db8ea0443f1a4248b62aea17adb0fea69d1f3
|
[
"MIT"
] | null | null | null |
cnn/main.py
|
arielrodrigues/nlp-relations-extraction-ptbr
|
421db8ea0443f1a4248b62aea17adb0fea69d1f3
|
[
"MIT"
] | 1
|
2021-02-09T16:27:10.000Z
|
2021-02-09T16:27:10.000Z
|
# -*- coding: utf-8 -*-
__author__ = "Ariel Rodrigues"
__version__ = "0.1.0"
__license__ = ""
"""
Module Docstring
params: {
join_data: string,
embeddings: string
}
"""
import luigi
import logging
import datetime
import utils
log = logging.getLogger(__name__)
class CNN(luigi.Task):
params = luigi.DictParameter(default=None)
def are_valid_params(self):
return self.params and \
type(self.params["join_data"]) is str and \
type(self.params["embeddings"]) is str
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.are_valid_params():
raise Exception(f'CNN: worng params type')
self.constants = {
'join_data': f'../outputs/{self.params["join_data"]}',
'embeddings': f'../outputs/{self.params["embeddings"]}'
}
def run(self):
self.emite_log(f'starting task with params {str(self.constants)}')
RESULTS = utils.main(self.constants['join_data'], self.constants['embeddings'])
self.write_result(RESULTS)
self.emite_log(f'task has finnished')
def emite_log(self, message):
formated_datetime = datetime.datetime.now().strftime('%d-%m-%Y-%H-%M-%S')
log.info(f'{formated_datetime}: {message}')
def output(self):
return luigi.LocalTarget(f'../outputs/results/result_{self.constants['join_data']}_{self.constants['embeddings']}')
def write_result(self, result):
with self.output().open('w') as out_file:
for line in result:
out_file.write(line)
if __name__ == '__main__':
luigi.run()
| 25.151515
| 123
| 0.620482
|
6c45fb71fe5f6d6211f5ae985a5e9d6210284cd4
| 450
|
py
|
Python
|
121-Best-Time-to-Buy-and-Sell -Stock.py
|
pyxuweitao/Python-LeetCode
|
16b23e340d38658107b6f4a6ab93119cda690d2a
|
[
"Apache-2.0"
] | null | null | null |
121-Best-Time-to-Buy-and-Sell -Stock.py
|
pyxuweitao/Python-LeetCode
|
16b23e340d38658107b6f4a6ab93119cda690d2a
|
[
"Apache-2.0"
] | null | null | null |
121-Best-Time-to-Buy-and-Sell -Stock.py
|
pyxuweitao/Python-LeetCode
|
16b23e340d38658107b6f4a6ab93119cda690d2a
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'xuweitao'
#runtime = 64ms
class Solution:
# @param {integer[]} prices
# @return {integer}
def maxProfit(self, prices):
profit = 0
if len( prices ) != 0:
min_price = prices[0]
for iPrice in prices:
if iPrice < min_price:
min_price = iPrice
profit = iPrice - min_price if iPrice - min_price > profit else profit
return profit
| 30
| 86
| 0.544444
|
1033cf3de60eb009d82175229ac3f753a6f973df
| 4,385
|
py
|
Python
|
phr/settings.py
|
Ezenwankwo/health_stack
|
7e760397a00d045713e286c75dca4038bc6b0145
|
[
"Apache-2.0"
] | null | null | null |
phr/settings.py
|
Ezenwankwo/health_stack
|
7e760397a00d045713e286c75dca4038bc6b0145
|
[
"Apache-2.0"
] | null | null | null |
phr/settings.py
|
Ezenwankwo/health_stack
|
7e760397a00d045713e286c75dca4038bc6b0145
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for phr project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
from decouple import config
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=lambda v: [s.strip() for s in v.split(',')])
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'autoslug',
'users',
'records',
'plans',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'phr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'phr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/accounts/file/list/'
LOGOUT_REDIRECT_URL = '/accounts/login/'
# Use our custom User Model
AUTH_USER_MODEL = 'users.Account'
# Authy Application Key
ACCOUNT_SECURITY_API_KEY = config('ACCOUNT_SECURITY_API_KEY')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
| 26.737805
| 98
| 0.713341
|
efec3bf59e9196127f85d1fdacfb7ba27f6724ce
| 6,686
|
py
|
Python
|
calabiyau/views/accounting.py
|
TachyonicProject/calabiyau
|
415a8ada4a93ee84c4776e89c9442af328dcfdd6
|
[
"BSD-3-Clause"
] | null | null | null |
calabiyau/views/accounting.py
|
TachyonicProject/calabiyau
|
415a8ada4a93ee84c4776e89c9442af328dcfdd6
|
[
"BSD-3-Clause"
] | 8
|
2019-06-06T11:01:48.000Z
|
2019-06-06T12:18:03.000Z
|
calabiyau/views/accounting.py
|
TachyonicProject/calabiyau
|
415a8ada4a93ee84c4776e89c9442af328dcfdd6
|
[
"BSD-3-Clause"
] | 3
|
2019-03-28T07:36:22.000Z
|
2019-12-27T12:10:14.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon import register
from luxon import router
from luxon.helpers.api import raw_list, sql_list, obj
from luxon.utils import sql
from luxon import db
from calabiyau.models.subscribers import calabiyau_subscriber
from luxon import GetLogger
log = GetLogger(__name__)
@register.resources()
class Accounting(object):
def __init__(self):
# Services Users
router.add('GET',
'/v1/accounting/data/daily/{user_id}',
self.data_daily,
tag='services:view')
router.add('GET',
'/v1/accounting/data/monthly/{user_id}',
self.data_monthly,
tag='services:view')
router.add('GET',
'/v1/accounting/data/usage/{user_id}',
self.data_usage,
tag='services:view')
def data_daily(self, req, resp, user_id):
user = obj(req, calabiyau_subscriber, sql_id=user_id)
f_id = sql.Field('id')
f_user_id = sql.Field('user_id')
f_today = sql.Field('today')
f_acctinputoctets = sql.Field('ROUND(acctinputoctets' +
' / 1024 / 1024 / 1024, 2)' +
' AS acctinputoctets')
f_acctoutputoctets = sql.Field('ROUND(acctoutputoctets' +
' / 1024 / 1024 / 1024, 2)' +
' AS acctoutputoctets')
v_user_id = sql.Value(user['id'])
select = sql.Select('calabiyau_accounting')
select.fields = (f_id,
f_today,
f_acctinputoctets,
f_acctoutputoctets,)
select.where = f_user_id == v_user_id
select.order_by = f_today('>')
return sql_list(req,
select,
limit=32)
def data_monthly(self, req, resp, user_id):
user = obj(req, calabiyau_subscriber, sql_id=user_id)
f_id = sql.Field('id')
f_user_id = sql.Field('user_id')
f_today = sql.Field('today')
f_acctinputoctets = sql.Field('ROUND(SUM(acctinputoctets)' +
' / 1024 / 1024 / 1024, 2)' +
' AS acctinputoctets')
f_acctoutputoctets = sql.Field('ROUND(SUM(acctoutputoctets)' +
' / 1024 / 1024 / 1024, 2)' +
' AS acctoutputoctets')
v_user_id = sql.Value(user['id'])
select = sql.Select('calabiyau_accounting')
select.fields = (f_id,
f_today,
f_acctinputoctets,
f_acctoutputoctets,)
select.where = f_user_id == v_user_id
select.order_by = f_today('>')
select.group_by = sql.Field('YEAR(today)'), sql.Field('MONTH(today)')
return sql_list(req,
select,
limit=12)
def data_usage(self, req, resp, user_id):
content = []
user = obj(req, calabiyau_subscriber, sql_id=user_id)
if user['volume_used_bytes']:
used = user['volume_used_bytes'] / 1024 / 1024 / 1024
else:
used = 0
f_user_id = sql.Field('user_id')
v_user_id = sql.Value(user['id'])
f_volume_gb = sql.Field('sum(volume_gb) as volume_gb')
select = sql.Select('calabiyau_topup')
select.fields = f_volume_gb
select.where = f_user_id == v_user_id
with db() as conn:
result = conn.execute(select.query, select.values).fetchone()
if result:
topups = result['volume_gb']
if topups is None:
topups = 0
else:
topups = 0
if not user['volume_used']:
f_pkg_id = sql.Field('id')
v_pkg_id = sql.Value(user['package_id'])
f_volume_gb = sql.Field('volume_gb')
select = sql.Select('calabiyau_package')
select.where = f_pkg_id == v_pkg_id
result = conn.execute(select.query, select.values).fetchone()
if result:
pkg_volume = result['volume_gb']
if pkg_volume is None:
pkg_volume = 0
pkg_volume = pkg_volume - used
if pkg_volume < 0:
pkg_volume = 0
else:
pkg_volume = 0
else:
pkg_volume = 0
topups = float(topups) - float(used)
if topups < 0:
topups = 0
content.append({'type': 'Topups',
'gb': round(float(topups), 2)})
content.append({'type': 'Used',
'gb': round(float(used), 2)})
content.append({'type': 'Package',
'gb': round(float(pkg_volume), 2)})
return raw_list(req, content)
| 39.797619
| 79
| 0.560873
|
6f145cbf68dae0d4813f36b236a1f94f58541e7b
| 1,618
|
py
|
Python
|
setup.py
|
tjnh05/ner_build
|
b07ee1cd78c82804f386fd935c983f202020669a
|
[
"MIT"
] | null | null | null |
setup.py
|
tjnh05/ner_build
|
b07ee1cd78c82804f386fd935c983f202020669a
|
[
"MIT"
] | null | null | null |
setup.py
|
tjnh05/ner_build
|
b07ee1cd78c82804f386fd935c983f202020669a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import setuptools, os
import sys
packagename= "pyner"
version = ""
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, packagename, "ner.py")) as f:
for line in f.readlines():
if "__version__" in line:
version = line.strip().replace('"', '').split()[-1]
break
path = os.path.join(os.getcwd(),packagename, "README.md")
requiredments = os.path.join(os.getcwd(),packagename, "requirements.txt")
with open(path, "r", encoding='utf-8') as fh:
long_description = ''.join(fh.readlines()[:-1])
with open(requiredments, "r", encoding='utf-8') as fh:
requiredpackages = [ line.strip() for line in fh.readlines() ]
#print(requiredpackages)
setuptools.setup(
name=packagename,
version=version,
author="Wang Bodhi Faqun",
author_email="jyxz5@hotmail.com",
description="This program is SDK of NER service API to handle one sentence or simple text file.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/tjnh05/ner",
license = 'MIT',
keywords = 'ner pyner',
platforms = ['any'],
packages=setuptools.find_packages(),
package_data={
'pyner': ['test.txt','requirements.txt']
},
entry_points = {
"console_scripts": [
"ner=pyner.ner:main"
]
},
install_requires=requiredpackages,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.5.2',
)
| 28.385965
| 101
| 0.639061
|
d5ae876c98257b3bb1a74581ac8aa627f2c257bc
| 1,152
|
py
|
Python
|
kubernetes/test/test_v1_azure_disk_volume_source.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_azure_disk_volume_source.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1_azure_disk_volume_source.py
|
amanagarwal33/python
|
e31693557f75950805fb4dc5af4cb7434a470e26
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
<<<<<<< HEAD
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
=======
OpenAPI spec version: v1.5.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
>>>>>>> release-1.0
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_azure_disk_volume_source import V1AzureDiskVolumeSource # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1AzureDiskVolumeSource(unittest.TestCase):
"""V1AzureDiskVolumeSource unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1AzureDiskVolumeSource(self):
"""Test V1AzureDiskVolumeSource"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_azure_disk_volume_source.V1AzureDiskVolumeSource() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 25.043478
| 124
| 0.717014
|
9201bd1f017c6fc48787c8a3155692f107ea9adc
| 151
|
py
|
Python
|
multiple_futures_prediction/__init__.py
|
LaudateCorpus1/ml-multiple-futures-prediction
|
6b24c8e10c0f974af2df59fdfc68ba83a0ad21e1
|
[
"AML"
] | 82
|
2019-10-30T13:53:55.000Z
|
2022-03-28T19:40:01.000Z
|
multiple_futures_prediction/__init__.py
|
LaudateCorpus1/ml-multiple-futures-prediction
|
6b24c8e10c0f974af2df59fdfc68ba83a0ad21e1
|
[
"AML"
] | 6
|
2019-11-11T12:58:10.000Z
|
2021-05-04T04:11:15.000Z
|
multiple_futures_prediction/__init__.py
|
LaudateCorpus1/ml-multiple-futures-prediction
|
6b24c8e10c0f974af2df59fdfc68ba83a0ad21e1
|
[
"AML"
] | 21
|
2019-11-04T20:20:41.000Z
|
2022-03-26T11:39:27.000Z
|
#
# Copyright (C) 2020 Apple Inc. All rights reserved.
#
"""Demo code for Multiple Futures Prediction Paper (NeurIPS 2019)."""
__version__ = "0.1.0"
| 18.875
| 69
| 0.695364
|
bbbe2fe62d3e15546180fd97804d5434273c3f11
| 17,247
|
py
|
Python
|
sandbox/elearning/test_markup_conversion.py
|
cirosantilli/python-utils
|
3854d2c7973c6382f76e311423c219bccacb8c1d
|
[
"MIT"
] | 1
|
2018-10-04T15:29:04.000Z
|
2018-10-04T15:29:04.000Z
|
sandbox/elearning/test_markup_conversion.py
|
cirosantilli/python-utils
|
3854d2c7973c6382f76e311423c219bccacb8c1d
|
[
"MIT"
] | null | null | null |
sandbox/elearning/test_markup_conversion.py
|
cirosantilli/python-utils
|
3854d2c7973c6382f76e311423c219bccacb8c1d
|
[
"MIT"
] | null | null | null |
import os.path
import re
from xml.parsers import expat
import codecs, sys
'''
Transforms my innter markup into markup for my wordpress site.
Main tasks are:
- expand as much Javascript as possible to improve speed, and portability to non js users and google reader like sites. Things that can be handled with CSS will not be touched.
TODO:
is there a way to put formulas in the TOC??
how to avoid repeating a theorem and a header with almost identical names?
'''
theorem_re = re.compile(r'<div class="theorem"( id="([^"]*?)"|)>')
definition_re = re.compile(r'<div class="definition"( id="([^"]*?)"|)>')
example_re = re.compile(r'<div class="example"( id="([^"]*?")|)>')
newline_space_re = re.compile(r'[ ]+(\n|\r\n)[ ]+', flags=re.DOTALL)
newline_re = re.compile(r'(\n|\r\n)(\n|\r\n)(\n|\r\n)+', flags=re.DOTALL)
space_re = re.compile(r' [ ]+')
endstartspace_re = re.compile(r'(^\s+|\s+$)') #- used to remove extra double spaces at beginning and end of tranform -#
whitespace_re = re.compile(r'^\s+$')
iqeq_re = re.compile(r'(\\\[|\\\()(.*?)(\\\]|\\\))', flags=re.DOTALL)
header_re = re.compile(r'<h\d>(.*?)</h\d>')
header_line_re = re.compile(r'==(\s*?)----', flags=re.DOTALL)
def to_html_minus( match ):
result = match.group()
result = result.replace('<','<')
return result
def to_ascii_minus( match ):
result = match.group()
result = result.replace('<','<')
return result
def remove_math_tags( match ):
result = match.group()
result = result.replace('\\(','')
result = result.replace('\\)','')
return result
def first_letter_upper_case( s ):
return s[0].upper() + s[1:]
#- major function. transform old file into wiki file. ------------------------------#
def transform(input):
output = input
#- direct replaces ------------------------------#
# output = output.replace('<div class="theo-title">', '')
# output = output.replace('<div class="theo-body">', '')
# output = output.replace('<div class="theo-hypothesis">', '\'\'\'Hypothesis\'\'\' ')
# output = output.replace('<div class="theo-conclusions">', '\'\'\'Conclusion\'\'\' ')
#
# output = output.replace('<div class="def-title">', '')
# output = output.replace('<div class="def-body">', '')
#
# output = output.replace('<div class="exp-title">', '')
# output = output.replace('<div class="exp-body">', '')
#
# output = output.replace('<div class="fact">', '')
#
# output = output.replace('</div>', '')
# output = output.replace('</div>', '')
#
# #- html tags simple replace-#
# output = output.replace('<ul>', '\n\n')
# output = output.replace('</ul>', '\n\n')
# output = output.replace('<li>', '* ')
# output = output.replace('</li>', '\n')
#
# output = output.replace('<p>', '\n\n')
# output = output.replace('</p>', '\n\n')
#
# output = output.replace('<iq>', r'\(')
# output = output.replace('</iq>', r'\)')
# output = output.replace('<eq>', r'\[')
# output = output.replace('</eq>', r'\]')
#
#
#
# #- regexp ------------------------------#
# output = theorem_re.sub('\'\'\'Theorem\'\'\' ',output)
# output = definition_re.sub('\'\'\'Definition\'\'\' ',output)
# output = example_re.sub('\'\'\'Example\'\'\' ',output)
#
# #- remove math from headers. Wikipedia for example does not allow them -#
# output = header_re.sub(remove_math_tags,output)
#- sequential parser ------------------------------#
p = Parser()
p.feed(output) #- Does not take python unicode object!!!! Takes bytes and an encoding. This way you just take it out of a file and feed the xml. -#
p.close()
output = p.result()
# #- simple replaces ------------------------------#
#
# #- math -#
# output = output.replace('\\(', '<math>')
# output = output.replace('\\)', '</math>')
# output = output.replace('\\[', '\n\n<math>')
# output = output.replace('\\]', '</math>\n\n')
#
#
#
# #- formatting -#
# output = header_line_re.sub('==\n\n',output) #- remove extra line that would be created if after == -#
#
# #- spaces -#
# output = output.replace('\t', '')
# output = space_re.sub('',output)
# output = newline_space_re.sub('\n',output) #- remove spaces together with whitelines. Used 2x because of overlappings and becaues Im lazy to figure a proper solution. -#
# output = newline_space_re.sub('\n',output)
# output = newline_re.sub('\n\n',output)
# output = endstartspace_re.sub('',output) #- remove extra white from beginning and end -#
return output
class Parser:
def __init__(self):
self._parser = expat.ParserCreate('utf-8') #- Does not take python unicode object!!!! Takes bytes and an encoding. This way you just take it out of a file and feed the xml. -#
self._parser.StartElementHandler = self.start
self._parser.EndElementHandler = self.end
self._parser.StartCdataSectionHandler = self.startCdata
self._parser.EndCdataSectionHandler = self.endCdata
self._parser.CharacterDataHandler = self.data
self._classes = {
'def-title':['def-marker','Definition'],
'theo-title':['theo-marker','Theorem'],
'theo-hypothesis':['hypo-marker','Hypothesis: '],
'theo-conclusions':['concl-marker','Conclusions: '],
'def-title':['def-marker','Definition'],
'algo-title':['algo-marker','Algorithm'],
'exp-title':['exp-marker','Example'],
'cexample':['cexp-marker','Counter-example'],
'rem-title':['rem-marker','Remark'],
}
def feed(self, data):
#- initialization -#
self._result = u''
self._openelems = [] #- a list that stores tags in order of arrival, so that we know tags of the closing element -#
self._encountered = { #- how many of these tags have been seen in the whole document. -#
'h2':0
}
#- I dont have standard xml, but almost. Convert it. -#
data = '<root>' + data + '</root>' #- one root tag -#
# data = data.replace('\\[', '<eq>')
# data = data.replace('\\]', '</eq>')
data = data.replace('&', '&') #- cannot have ampersand in xml. -#
data = iqeq_re.sub(to_html_minus,data) #- cannot have minus in xml -#
#- parse -#
self._parser.Parse(data, 0)
def close(self):
self._parser.Parse("", 1) # end of data
del self._parser # get rid of circular references
#- Undo what I did in feed. -#
self._result = self._result.replace('&','&') #- ampersand representation -#
self._result = iqeq_re.sub(to_ascii_minus,self._result) #- minus representation -#
def start(self, tag, atrs):
self._openelems.append({'tag':tag, 'atrs':atrs, 'child':0}) #- put last element in element pile. tag, atrs, n, where n indicates we are on the nth child. doclose is used by the closing tag. -#
if(len(self._openelems)>1):
self._openelems[-2]['child'] += 1 #- increase child of last open element -#
newatrs = atrs.copy()
if(tag=='div' and 'class' in atrs):
clas = atrs['class'] #- cannot name a variable class! restricted in python of cours... -#
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
if(clas in self._classes):
self._result += self.original_open_xml_elem((tag,atrs)) + '<span class="'+self._classes[clas][0]+'">'+self._classes[clas][1]+'</span> '
else:
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag=='h2'):
self._encountered['h2']+=1
if( self._encountered['h2']==1 ): #- add toc before first h2 of document -#
self._result += '<ul id=\"page-local-toc\"></ul>\n\n'
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag=='h3' ):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag=='h4' ):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag=='h5' ):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag=='h6' ):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'p'):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'ul'):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'ol'):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'li'):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span>'
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'iq'):
self._result += r'\('
elif(tag == 'eq'):
if('id' in atrs): #- to link with refs -#
self._result += '<div class="margin-rel"><span class="margin-abs">'+atrs['id']+'</span><span id="'+atrs['id']+'">'
self._result += r'\[\begin{align}'
elif(tag == 'em'):
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'span'):
self._result += self.original_open_xml_elem((tag,atrs))
elif(tag == 'a'): #- use current site path. ex: href="x" "asdf" -> http://mysite.index.php?asdf, "#asdf" -> http://mysite.index.php?currest_page#asdf -#
newhref = self.anc_href_transform(newatrs['href'])
newatrs['href'] = newhref
self._result += self.original_open_xml_elem((tag,newatrs))
elif(tag == 'ref'): #- link to elements, print link -#
self._result += '<a href="#'+ atrs['to'] +'">'+atrs['to']+'</a>'
elif(tag == 'img'): #- these tags close immediately -#
newsrc = self.img_src_transform(newatrs['src'])
newatrs['src'] = newsrc
self._result += '<a href="' + newsrc + '">'
self._result += self.original_open_xml_elem((tag,newatrs),True)
def data(self, data):
# if(not whitespace_re.match(data)):
# if( self.elem_has_tag_and_atr(self._openelems[-1],'div','class','img-subtitle') ):
# self._result += '|'
# if(len(self._openelems)>1 ):
# beflast = self._openelems[-2]
# if( beflast['tag']=='div' and 'class' in beflast['atrs']
# and beflast['atrs']['class'] in self._classes
# and beflast['child']==1): #- first child -#
# clas = beflast['atrs']['class'] #- cannot name a variable class! restricted in python of cours... -#
# self._result += '<span class="'+self._classes[clas][0]+'">'+self._classes[clas][1]+'</span>'
# curTag = self._openelems[-1]
self._result += data
def end(self, tag):
closes = self._openelems.pop()
atrs = closes['atrs'] #- remove the last open element from open element list. these are the atrs of the current closing element -#
if(tag=='div' and 'class' in atrs):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag=='h2'):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag=='h3' ):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag=='h4' ):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag=='h5' ):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag=='h6' ):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag == 'p'):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag == 'ul'):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag == 'ol'):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag == 'li'):
self._result += self.original_close_xml_elem((tag,atrs))
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
elif(tag == 'iq'):
self._result += r'\)'
elif(tag == 'eq'):
self._result += r'\end{align}\]'
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</span></div>'
elif(tag == 'em'):
self._result += self.original_close_xml_elem((tag,atrs))
elif(tag == 'span'):
self._result += self.original_close_xml_elem((tag,atrs))
elif(tag == 'a'):
self._result += self.original_close_xml_elem((tag,atrs))
elif(tag == 'img'): #- these tags close immediately -#
self._result += '</a>'
if('id' in closes['atrs']): #- to link with refs. close what we opened -#
self._result += '</div>'
def startCdata(self):
1
def endCdata(self):
1
def result(self):
return self._result
#- returns true if there is at least one open tag with a given atribute -#
def open_tag_with_atr(self, tag, atr, atrval):
for elem in self._openelems:
if( self.elem_has_tag_and_atr(elem,tag,atr,atrval) ):
return True
return False
#- returns true if the element has a given tag and atribute atr with value atrval -#
def elem_has_tag_and_atr(self, elem, tag, atr, atrval ):
return elem[0]==tag and atr in elem[1] and elem[1][atr]==atrval
#- returns the original opening of an element found in the xml. Nodata is used for elements that close immediately like img -#
def original_open_xml_elem(self, elem, nodata=False):
output = '<' + elem[0]
atrs = elem[1]
for atr in atrs :
output += ' ' + atr + '="' + atrs[atr] + '"'
if(nodata):
output += '/'
output += '>'
return output
#- returns the original opening of an element found in the xml. Nodata is used for elements that close immediately like img -#
def original_close_xml_elem(self, elem, nodata=False ):
return '</' + elem[0] + '>'
def img_src_transform(self, oldSrc):
begin = oldSrc[0:4]
curSrc = ''
if(begin=='http'): # relative to root
curSrc = oldSrc
else:
curSrc = uri + media_dir + '/' + oldSrc;
return curSrc;
def anc_href_transform(self, oldHref):
begin = oldHref[0:4]
if(begin=='http'): # absolute link
return oldHref;
elif(oldHref[0]=='#'): # id in current page
return uri + id + oldHref;
else: # otherwise link to the page with given id
return uri + oldHref;
#- main ------------------------------------------------------------#
#- site parameters -#
uri = 'http://localhost/elearning/index.php?'
media_dir = 'uploads' # folder where media is kept
#- local path parameters -#
root = r'C:\xampp\htdocs\elearning'
relative_input_path = 'articles'
relative_output_path = 'articles-test'
input_ext = 'xml'
output_ext = 'html'
ids = [
#'maximal-interval-of-solution-of-an-ode',
#'global-uniqueness-of-solution-of-an-ode-over-an-interval'
#'art'
#'limit-r1-r1'
#'monien-speckenmeyer-algorithm-n-sat'
'brunovky-normal-form-1d-control'
]
#- for commmand line input -#
if(len(sys.argv)>1):
(noext,ext) = os.path.splitext(sys.argv[1])
if ext == input_ext: #- checks if good extension -#
ids = [os.path.basename(noext)]
#print ids
#raw_input("click to continue")
for id in ids:
input_path = os.path.join(root, relative_input_path, id + '.' + input_ext)
file = open(input_path,'r')
input = file.read()
file.close()
output = transform(input)
#print output
output_path = os.path.join(root, relative_output_path, id + '.' + output_ext)
file = open(output_path,'w')
file.write(output.encode('utf-8'))
import webbrowser
webbrowser.open(uri + ids[0] + '#HERETESTID')
''' from wiki
r"(<(p|li|div).*?)<math>","$1\\("
r"<(p|li|div).*?)</math>","$1\\)"
r"<math>","\\[ "
r"</math>"," \\]"
['p','div','ul','ol','li','hi',]
'''
| 38.241685
| 193
| 0.606308
|
08024c0086cf63cb9c0d5c75b00d1a131b03dd05
| 2,562
|
py
|
Python
|
venv/lib/python3.9/site-packages/google/cloud/redis/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | 1
|
2022-01-17T21:06:44.000Z
|
2022-01-17T21:06:44.000Z
|
venv/lib/python3.9/site-packages/google/cloud/redis/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.9/site-packages/google/cloud/redis/__init__.py
|
qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3
|
630dcef73e6a258b6e9a52f934e2dd912ce741f8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.redis_v1.services.cloud_redis.client import CloudRedisClient
from google.cloud.redis_v1.services.cloud_redis.async_client import (
CloudRedisAsyncClient,
)
from google.cloud.redis_v1.types.cloud_redis import CreateInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import DeleteInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import ExportInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import FailoverInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import GcsDestination
from google.cloud.redis_v1.types.cloud_redis import GcsSource
from google.cloud.redis_v1.types.cloud_redis import GetInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import ImportInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import InputConfig
from google.cloud.redis_v1.types.cloud_redis import Instance
from google.cloud.redis_v1.types.cloud_redis import ListInstancesRequest
from google.cloud.redis_v1.types.cloud_redis import ListInstancesResponse
from google.cloud.redis_v1.types.cloud_redis import LocationMetadata
from google.cloud.redis_v1.types.cloud_redis import OperationMetadata
from google.cloud.redis_v1.types.cloud_redis import OutputConfig
from google.cloud.redis_v1.types.cloud_redis import UpdateInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import UpgradeInstanceRequest
from google.cloud.redis_v1.types.cloud_redis import ZoneMetadata
__all__ = (
"CloudRedisClient",
"CloudRedisAsyncClient",
"CreateInstanceRequest",
"DeleteInstanceRequest",
"ExportInstanceRequest",
"FailoverInstanceRequest",
"GcsDestination",
"GcsSource",
"GetInstanceRequest",
"ImportInstanceRequest",
"InputConfig",
"Instance",
"ListInstancesRequest",
"ListInstancesResponse",
"LocationMetadata",
"OperationMetadata",
"OutputConfig",
"UpdateInstanceRequest",
"UpgradeInstanceRequest",
"ZoneMetadata",
)
| 40.666667
| 78
| 0.807572
|
227238bd84ba653cf69f7ea115242d89709c92d8
| 79,872
|
py
|
Python
|
tensorflow/python/keras/losses_test.py
|
AdaAlarm/tensorflow
|
e0db063159751276a92d88a4ad6d481b1199318c
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/keras/losses_test.py
|
AdaAlarm/tensorflow
|
e0db063159751276a92d88a4ad6d481b1199318c
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/keras/losses_test.py
|
AdaAlarm/tensorflow
|
e0db063159751276a92d88a4ad6d481b1199318c
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras loss functions."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.autograph.impl import api as autograph
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import combinations
from tensorflow.python.keras import losses
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.platform import test
ALL_LOSSES = [
losses.mean_squared_error, losses.mean_absolute_error,
losses.mean_absolute_percentage_error,
losses.mean_squared_logarithmic_error, losses.squared_hinge, losses.hinge,
losses.categorical_crossentropy, losses.binary_crossentropy,
losses.kl_divergence, losses.poisson,
losses.cosine_similarity, losses.log_cosh, losses.categorical_hinge
]
class KerasLossesTest(test.TestCase, parameterized.TestCase):
def test_objective_shapes_3d(self):
with self.cached_session():
y_a = backend.variable(np.random.random((5, 6, 7)))
y_b = backend.variable(np.random.random((5, 6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.shape.as_list(), [5, 6])
def test_objective_shapes_2d(self):
with self.cached_session():
y_a = backend.variable(np.random.random((6, 7)))
y_b = backend.variable(np.random.random((6, 7)))
for obj in ALL_LOSSES:
objective_output = obj(y_a, y_b)
self.assertListEqual(objective_output.shape.as_list(), [
6,
])
def test_cce_one_hot(self):
with self.cached_session():
y_a = backend.variable(np.random.randint(0, 7, (5, 6)))
y_b = backend.variable(np.random.random((5, 6, 7)))
objective_output = losses.sparse_categorical_crossentropy(y_a, y_b)
assert backend.eval(objective_output).shape == (5, 6)
y_a = backend.variable(np.random.randint(0, 7, (6,)))
y_b = backend.variable(np.random.random((6, 7)))
objective_output = losses.sparse_categorical_crossentropy(y_a, y_b)
assert backend.eval(objective_output).shape == (6,)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_categorical_crossentropy_loss(self):
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
softmax_output = backend.softmax(logits)
output_from_logit = losses.categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = losses.categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_softmax),
atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
t = backend.placeholder()
p = backend.placeholder()
o = losses.categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor_v2_with_dispatch([[1., 0., 0.], [0., 1., 0.],
[0., 0., 1.]])
p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],
[.05, .89, .06],
[.05, .01, .94]])
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# from logits
p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],
[2., 3., 5.]])
o = losses.categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sparse_categorical_crossentropy_loss(self):
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
softmax_output = backend.softmax(logits)
output_from_logit = losses.sparse_categorical_crossentropy(
target, logits, from_logits=True)
output_from_softmax = losses.sparse_categorical_crossentropy(
target, softmax_output)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_softmax),
atol=1e-5)
@combinations.generate(combinations.combine(mode=['graph']))
def test_sparse_categorical_crossentropy_loss_with_unknown_rank_tensor(self):
# This test only runs in graph because the TF op layer is not supported yet
# for sparse ops.
t = backend.placeholder()
p = backend.placeholder()
o = losses.sparse_categorical_crossentropy(t, p)
t_val = ops.convert_to_tensor_v2_with_dispatch([0, 1, 2])
p_val = ops.convert_to_tensor_v2_with_dispatch([[.9, .05, .05],
[.05, .89, .06],
[.05, .01, .94]])
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.105, .116, .062], 1e-3)
# from logits
p_val = ops.convert_to_tensor_v2_with_dispatch([[8., 1., 1.], [0., 9., 1.],
[2., 3., 5.]])
o = losses.sparse_categorical_crossentropy(t, p, from_logits=True)
f = backend.function([t, p], o)
result = f([t_val, p_val])
self.assertArrayNear(result, [.002, 0, .17], 1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_binary_crossentropy_loss(self):
target = backend.variable(np.random.randint(0, 1, (5, 1)))
logits = backend.variable(np.random.random((5, 1)))
sigmoid_output = backend.sigmoid(logits)
output_from_logit = losses.binary_crossentropy(
target, logits, from_logits=True)
output_from_sigmoid = losses.binary_crossentropy(target, sigmoid_output)
np.testing.assert_allclose(
backend.eval(output_from_logit),
backend.eval(output_from_sigmoid),
atol=1e-5)
def test_get_bce(self):
bce_fn = losses.get('bce')
self.assertEqual(bce_fn, losses.binary_crossentropy)
def test_serialization(self):
fn = losses.get('mse')
config = losses.serialize(fn)
new_fn = losses.deserialize(config)
self.assertEqual(fn, new_fn)
def test_categorical_hinge(self):
y_pred = backend.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = backend.variable(np.array([[0, 1, 0], [1, 0, 0]]))
expected_loss = ((0.3 - 0.2 + 1) + (0.7 - 0.1 + 1)) / 2.0
loss = backend.eval(losses.categorical_hinge(y_true, y_pred))
self.assertAllClose(expected_loss, np.mean(loss))
def test_loss_wrapper(self):
loss_fn = losses.get('mse')
mse_obj = losses.LossFunctionWrapper(loss_fn, name=loss_fn.__name__)
self.assertEqual(mse_obj.name, 'mean_squared_error')
self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.AUTO)
y_true = constant_op.constant([[1., 9.], [2., 5.]])
y_pred = constant_op.constant([[4., 8.], [12., 3.]])
sample_weight = constant_op.constant([1.2, 0.5])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [5, 52]
# weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26]
# reduced_weighted_mse = (6 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 16, 1e-2)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_loss_wrapper_autograph(self):
# Test that functions with control flow wrapped in a LossFunctionWrapper
# get autographed when in a tf.function
def loss_fn(y_true, y_pred):
mse_loss_fn = losses.get('mse')
if math_ops.reduce_mean(y_true) > 0:
return mse_loss_fn(y_true, y_pred)
else:
return mse_loss_fn(y_true, y_pred)
mse_obj = losses.LossFunctionWrapper(loss_fn)
y_true = constant_op.constant([[1., 9.], [2., 5.]])
y_pred = constant_op.constant([[4., 8.], [12., 3.]])
sample_weight = constant_op.constant([1.2, 0.5])
@def_function.function
def tf_functioned_loss_fn(y_true, y_pred, sample_weight=None):
return mse_obj(y_true, y_pred, sample_weight=sample_weight)
loss = tf_functioned_loss_fn(y_true, y_pred, sample_weight=sample_weight)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 2, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [5, 52]
# weighted_mse = [5 * 1.2, 52 * 0.5] = [6, 26]
# reduced_weighted_mse = (6 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 16, 1e-2)
def test_invalid_reduction(self):
with self.assertRaisesRegex(ValueError, 'Invalid Reduction Key Foo.'):
losses.MeanSquaredError(reduction='Foo')
mse_obj = losses.MeanSquaredError()
y = constant_op.constant([1])
mse_obj.reduction = 'Bar'
with self.assertRaisesRegex(ValueError, 'Invalid Reduction Key Bar.'):
mse_obj(y, y)
def test_deserialization_error(self):
with self.assertRaisesRegex(ValueError, 'Could not interpret loss'):
losses.get(0)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_binary_crossentropy_uses_cached_logits(self):
logits = constant_op.constant([[-30., 30.]])
y_pred = activations.sigmoid(logits)
self.assertTrue(hasattr(y_pred, '_keras_logits'))
y_true = constant_op.constant([[0., 1.]])
loss = losses.binary_crossentropy(y_true, y_pred)[0]
# Check that logits are used. If y_pred is used directly, loss will
# collapse to 0 from underflow.
self.assertNotEqual(self.evaluate(loss), 0.)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_categorical_crossentropy_uses_cached_logits(self):
logits = constant_op.constant([[-5., 0., 5.]])
y_pred = activations.softmax(logits)
self.assertTrue(hasattr(y_pred, '_keras_logits'))
y_true = constant_op.constant([[0., 0., 1.]])
loss = losses.categorical_crossentropy(y_true, logits, from_logits=True)[0]
# Check that logits are used. If y_pred is used directly, loss will
# collapse to 0 from underflow.
self.assertNotEqual(self.evaluate(loss), 0.)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
def test_sparse_categorical_crossentropy_uses_cached_logits(self):
logits = constant_op.constant([[-5., 0., 5.]])
y_pred = activations.softmax(logits)
self.assertTrue(hasattr(y_pred, '_keras_logits'))
y_true = constant_op.constant([2])
loss = losses.sparse_categorical_crossentropy(
y_true, logits, from_logits=True)[0]
# Check that logits are used. If y_pred is used directly, loss will
# collapse to 0 from underflow.
self.assertNotEqual(self.evaluate(loss), 0.)
@combinations.generate(combinations.combine(mode=['eager']))
def test_loss_not_autographed_in_eager(self):
class MyLoss(losses.Loss):
def call(self, y_true, y_pred):
return y_true - y_pred
loss = MyLoss()
y_true = constant_op.constant([[0., 0., 0.]])
y_pred = constant_op.constant([[1., 1., 1.]])
def tf_convert(fn, _):
assert False, 'Function should not be autographed.'
return fn
with test.mock.patch.object(autograph, 'tf_convert', tf_convert):
loss(y_true, y_pred)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanSquaredErrorTest(test.TestCase):
def test_config(self):
mse_obj = losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.SUM, name='mse_1')
self.assertEqual(mse_obj.name, 'mse_1')
self.assertEqual(mse_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mse_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 49.5, 3)
def test_scalar_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 113.85, 3)
def test_sample_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 767.8 / 6, 3)
def test_ragged_tensors(self):
mse_obj = losses.MeanSquaredError()
y_true = ragged_factory_ops.constant([[1., 1., 9.], [2., 5.]])
y_pred = ragged_factory_ops.constant([[4., 1., 8.], [12., 3.]])
sample_weight = constant_op.constant([1.2, 0.5])
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
# mse = [((4 - 1)^2 + (8 - 9)^2) / 3, ((12 - 2)^2 + (3 - 5)^2) / 2]
# mse = [3.(3), 52]
# weighted_mse = [3.(3) * 1.2, 52 * 0.5] = [4, 26]
# reduced_weighted_mse = (4 + 26) / 2 =
self.assertAllClose(self.evaluate(loss), 15, 1e-2)
def test_timestep_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mse_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 587 / 6, 3)
def test_zero_weighted(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mse_obj = losses.MeanSquaredError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
(r'Incompatible shapes: \[2,3\] vs. \[2,2\]|'
'Dimensions must be equal')):
mse_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mse_obj = losses.MeanSquaredError(reduction=losses_utils.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [84.3333, 143.3666], 1e-3)
def test_sum_reduction(self):
mse_obj = losses.MeanSquaredError(reduction=losses_utils.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mse_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 227.69998, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanAbsoluteErrorTest(test.TestCase):
def test_config(self):
mae_obj = losses.MeanAbsoluteError(
reduction=losses_utils.ReductionV2.SUM, name='mae_1')
self.assertEqual(mae_obj.name, 'mae_1')
self.assertEqual(mae_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
loss = mae_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 5.5, 3)
def test_scalar_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 12.65, 3)
def test_sample_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 81.4 / 6, 3)
def test_timestep_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 83 / 6, 3)
def test_zero_weighted(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_invalid_sample_weight(self):
mae_obj = losses.MeanAbsoluteError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3, 1))
sample_weight = constant_op.constant([3, 6, 5, 0], shape=(2, 2))
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
(r'Incompatible shapes: \[2,3\] vs. \[2,2\]|'
'Dimensions must be equal')):
mae_obj(y_true, y_pred, sample_weight=sample_weight)
def test_no_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction=losses_utils.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [10.7333, 14.5666], 1e-3)
def test_sum_reduction(self):
mae_obj = losses.MeanAbsoluteError(reduction=losses_utils.ReductionV2.SUM)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mae_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 25.29999, 3)
def test_ragged_tensor(self):
mae_obj = losses.MeanAbsoluteError()
y_true = ragged_factory_ops.constant([[1, 9, 2], [-5, -2]],
dtype=dtypes.float32)
y_pred = ragged_factory_ops.constant([[4, 8, 12], [8, 1]],
dtype=dtypes.float32)
# loss = [14/3, 16/2]
sample_weight = constant_op.constant([1.2, 1.0], shape=(2, 1))
loss = mae_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 6.8, 5)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanAbsolutePercentageErrorTest(test.TestCase):
def test_config(self):
mape_obj = losses.MeanAbsolutePercentageError(
reduction=losses_utils.ReductionV2.SUM, name='mape_1')
self.assertEqual(mape_obj.name, 'mape_1')
self.assertEqual(mape_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 211.8518, 3)
def test_scalar_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 487.259, 3)
def test_sample_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 422.8888, 3)
def test_ragged_tensors(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = ragged_factory_ops.constant([[1, 9, 2], [-5, -2]])
y_pred = ragged_factory_ops.constant([[4, 8, 12], [8, 1]],
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 510.7222, 3)
def test_timestep_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = mape_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 694.4445, 3)
def test_zero_weighted(self):
mape_obj = losses.MeanAbsolutePercentageError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_no_reduction(self):
mape_obj = losses.MeanAbsolutePercentageError(
reduction=losses_utils.ReductionV2.NONE)
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = mape_obj(y_true, y_pred, sample_weight=2.3)
loss = self.evaluate(loss)
self.assertArrayNear(loss, [621.8518, 352.6666], 1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class MeanSquaredLogarithmicErrorTest(test.TestCase):
def test_config(self):
msle_obj = losses.MeanSquaredLogarithmicError(
reduction=losses_utils.ReductionV2.SUM, name='mape_1')
self.assertEqual(msle_obj.name, 'mape_1')
self.assertEqual(msle_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 1.4370, 3)
def test_scalar_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 3.3051, 3)
def test_sample_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 3.7856, 3)
def test_timestep_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 2.6473, 3)
def test_zero_weighted(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = msle_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_ragged_tensors(self):
msle_obj = losses.MeanSquaredLogarithmicError()
y_true = ragged_factory_ops.constant([[1, 9, 2], [-5, -2]])
# log(max(y_true, 0) + 1): [[0.69314, 2.3025, 1.0986], [0., 0.]]
y_pred = ragged_factory_ops.constant([[4, 8, 12], [8, 1]],
dtype=dtypes.float32)
# log(max(y_pred, 0) + 1): [[1.6094, 2.1972, 2.5649], [2.1972, 0.6932]]
# per batch loss: [1.0002, 2.6541]
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = msle_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 5.1121, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CosineSimilarityTest(test.TestCase):
def l2_norm(self, x, axis):
epsilon = 1e-12
square_sum = np.sum(np.square(x), axis=axis, keepdims=True)
x_inv_norm = 1 / np.sqrt(np.maximum(square_sum, epsilon))
return np.multiply(x, x_inv_norm)
def setup(self, axis=1):
self.np_y_true = np.asarray([[1, 9, 2], [-5, -2, 6]], dtype=np.float32)
self.np_y_pred = np.asarray([[4, 8, 12], [8, 1, 3]], dtype=np.float32)
y_true = self.l2_norm(self.np_y_true, axis)
y_pred = self.l2_norm(self.np_y_pred, axis)
self.expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(axis,))
self.y_true = constant_op.constant(self.np_y_true)
self.y_pred = constant_op.constant(self.np_y_pred)
def test_config(self):
cosine_obj = losses.CosineSimilarity(
axis=2, reduction=losses_utils.ReductionV2.SUM, name='cosine_loss')
self.assertEqual(cosine_obj.name, 'cosine_loss')
self.assertEqual(cosine_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = 2.3
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_sample_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
sample_weight = np.asarray([1.2, 3.4])
loss = cosine_obj(
self.y_true,
self.y_pred,
sample_weight=constant_op.constant(sample_weight))
expected_loss = -np.mean(self.expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
np_y_true = self.np_y_true.reshape((2, 3, 1))
np_y_pred = self.np_y_pred.reshape((2, 3, 1))
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape((2, 3))
y_true = self.l2_norm(np_y_true, 2)
y_pred = self.l2_norm(np_y_pred, 2)
expected_loss = np.sum(np.multiply(y_true, y_pred), axis=(2,))
y_true = constant_op.constant(np_y_true)
y_pred = constant_op.constant(np_y_pred)
loss = cosine_obj(
y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
expected_loss = -np.mean(expected_loss * sample_weight)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
cosine_obj = losses.CosineSimilarity()
loss = cosine_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
def test_axis(self):
self.setup(axis=1)
cosine_obj = losses.CosineSimilarity(axis=1)
loss = cosine_obj(self.y_true, self.y_pred)
expected_loss = -np.mean(self.expected_loss)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class BinaryCrossentropyTest(test.TestCase):
def test_config(self):
bce_obj = losses.BinaryCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name='bce_1')
self.assertEqual(bce_obj.name, 'bce_1')
self.assertEqual(bce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.float32)
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
bce_obj = losses.BinaryCrossentropy()
loss = bce_obj(y_true, y_pred)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Reduced loss = 15.33 / 4
self.assertAlmostEqual(self.evaluate(loss), 3.833, 3)
# Test with logits.
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# = [((100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))),
# ((100 - 100 * 0 + log(1 + exp(-100))) +
# (100 - 100 * 1 + log(1 + exp(-100))) +
# (0 + 100 * 1 + log(1 + exp(-100))))]
# = [(0 + 0 + 0) / 3, 200 / 3]
# Reduced loss = (0 + 66.666) / 2
self.assertAlmostEqual(self.evaluate(loss), 33.333, 3)
def test_scalar_weighted(self):
bce_obj = losses.BinaryCrossentropy()
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
loss = bce_obj(y_true, y_pred, sample_weight=2.3)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Weighted loss = [0, 15.33 * 2.3, 0, 0]
# Reduced loss = 15.33 * 2.3 / 4
self.assertAlmostEqual(self.evaluate(loss), 8.817, 3)
# Test with logits.
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=2.3)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0) / 3, 200 / 3]
# Weighted loss = [0 * 2.3, 66.666 * 2.3]
# Reduced loss = (0 + 66.666 * 2.3) / 2
self.assertAlmostEqual(self.evaluate(loss), 76.667, 3)
def test_sample_weighted(self):
bce_obj = losses.BinaryCrossentropy()
y_true = np.asarray([1, 0, 1, 0]).reshape([2, 2])
y_pred = np.asarray([1, 1, 1, 0], dtype=np.float32).reshape([2, 2])
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# EPSILON = 1e-7, y = y_true, y` = y_pred, Y_MAX = 0.9999999
# y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
# y` = [Y_MAX, Y_MAX, Y_MAX, EPSILON]
# Loss = -(y log(y` + EPSILON) + (1 - y) log(1 - y` + EPSILON))
# = [-log(Y_MAX + EPSILON), -log(1 - Y_MAX + EPSILON),
# -log(Y_MAX + EPSILON), -log(1)]
# = [0, 15.33, 0, 0]
# Reduced loss = 15.33 * 1.2 / 4
self.assertAlmostEqual(self.evaluate(loss), 4.6, 3)
# Test with logits.
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
weights = constant_op.constant([4, 3])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, 200 / 3]
# Weighted loss = [0 * 4, 66.666 * 3]
# Reduced loss = (0 + 66.666 * 3) / 2
self.assertAlmostEqual(self.evaluate(loss), 100, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[1, 0, 1], [0, 1, 1]])
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0]])
bce_obj = losses.BinaryCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE)
loss = bce_obj(y_true, logits)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, (200)/3]
self.assertAllClose((0., 66.6666), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 1]])
label_smoothing = 0.1
# Loss: max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Label smoothing: z' = z * (1 - L) + 0.5L
# 1 = 1 - 0.5L
# 0 = 0.5L
# Applying the above two fns to the given input:
# (100 - 100 * (1 - 0.5 L) + 0 +
# 0 + 100 * (0.5 L) + 0 +
# 0 + 100 * (1 - 0.5 L) + 0) * (1/3)
# = (100 + 50L) * 1/3
bce_obj = losses.BinaryCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = bce_obj(y_true, logits)
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_ragged_tensors(self):
bce_obj = losses.BinaryCrossentropy()
y_true = ragged_factory_ops.constant([[1, 0, 1], [0]])
y_pred = ragged_factory_ops.constant([[1, 1, 1], [0]], dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = bce_obj(y_true, y_pred, sample_weight=sample_weight)
# per batch loss = [ sum([0, 15.33, 0]) / 3, 0. ]
# = [ 5.11, 0]
# Reduced loss = 5.11 * 1.2 / 2
self.assertAlmostEqual(self.evaluate(loss), 3.0666, 3)
# Test with logits.
y_true = ragged_factory_ops.constant([[1, 0, 1], [0, 1]])
logits = ragged_factory_ops.constant([[100.0, -100.0, 100.0],
[100.0, 100.0]])
weights = constant_op.constant([4, 3])
bce_obj = losses.BinaryCrossentropy(from_logits=True)
loss = bce_obj(y_true, logits, sample_weight=weights)
# Loss = max(x, 0) - x * z + log(1 + exp(-abs(x)))
# (where x = logits and z = y_true)
# Loss = [(0 + 0 + 0)/3, 100 / 2]
# Weighted loss = [0 * 4, 50 * 3]
# Reduced loss = (0 + 50 * 3) / 2
self.assertAlmostEqual(self.evaluate(loss), 75., 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = losses.CategoricalCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name='bce_1')
self.assertEqual(cce_obj.name, 'bce_1')
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]],
dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = losses.CategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
def test_label_smoothing(self):
logits = constant_op.constant([[100.0, -100.0, -100.0]])
y_true = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
# Softmax Cross Entropy Loss: -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100]
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# Label smoothing: z' = z * (1 - L) + L/n
# 1 = 1 - L + L/n
# 0 = L/n
# Applying the above two fns to the given input:
# -0 * (1 - L + L/n) + 200 * L/n + 200 * L/n = 400 L/n
cce_obj = losses.CategoricalCrossentropy(
from_logits=True, label_smoothing=label_smoothing)
loss = cce_obj(y_true, logits)
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(self.evaluate(loss), expected_value, 3)
def test_shape_mismatch(self):
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant([[.9, .05, .05], [.5, .89, .6],
[.05, .01, .94]])
cce_obj = losses.CategoricalCrossentropy()
with self.assertRaisesRegex(ValueError, 'Shapes .+ are incompatible'):
cce_obj(y_true, y_pred)
def test_ragged_tensors(self):
cce_obj = losses.CategoricalCrossentropy()
y_true = ragged_factory_ops.constant([[[1, 0, 0], [0, 1, 0]], [[0, 0, 1]]])
y_pred = ragged_factory_ops.constant(
[[[.9, .05, .05], [.5, .89, .6]], [[.05, .01, .94]]],
dtype=dtypes.float32)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = constant_op.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054, 0.8047, 0.0619]) / 3
self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3)
# Test with logits.
logits = ragged_factory_ops.constant([[[8., 1., 1.], [0., 9., 1.]],
[[2., 3., 5.]]])
cce_obj = losses.CategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SparseCategoricalCrossentropyTest(test.TestCase):
def test_config(self):
cce_obj = losses.SparseCategoricalCrossentropy(
reduction=losses_utils.ReductionV2.SUM, name='scc')
self.assertEqual(cce_obj.name, 'scc')
self.assertEqual(cce_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct_unweighted(self):
y_true = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
y_pred = constant_op.constant([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]],
dtype=dtypes.float32)
cce_obj = losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
# Test with logits.
logits = constant_op.constant([[10., 0., 0.], [0., 10., 0.], [0., 0., 10.]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = constant_op.constant([0, 1, 2])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), .3239, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(self.evaluate(loss), .0573, 3)
def test_scalar_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .1317, 3)
def test_sample_weighted(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = constant_op.constant([[0], [1], [2]])
y_pred = constant_op.constant(
[[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]], dtype=dtypes.float32)
sample_weight = constant_op.constant([[1.2], [3.4], [5.6]], shape=(3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 1.0696, 3)
# Test with logits.
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.31829, 3)
def test_no_reduction(self):
y_true = constant_op.constant([[0], [1], [2]])
logits = constant_op.constant([[8., 1., 1.], [0., 9., 1.], [2., 3., 5.]])
cce_obj = losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=losses_utils.ReductionV2.NONE)
loss = cce_obj(y_true, logits)
self.assertAllClose((0.001822, 0.000459, 0.169846), self.evaluate(loss), 3)
def test_non_tensor(self):
# Test case for GitHub issue 33394.
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = [[0], [1], [2]]
y_pred = [[.9, .05, .05], [.5, .89, .6], [.05, .01, .94]]
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), .7449, 3)
def test_ragged_tensors(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = ragged_factory_ops.constant([[0, 1], [2]])
y_pred = ragged_factory_ops.constant(
[[[.9, .05, .05], [.5, .89, .6]], [[.05, .01, .94]]],
dtype=dtypes.float32)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = constant_op.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054, 0.8047, 0.0619]) / 3
self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3)
# Test with logits.
logits = ragged_factory_ops.constant([[[8., 1., 1.], [0., 9., 1.]],
[[2., 3., 5.]]])
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3)
def test_ragged_tensors_rank_1(self):
cce_obj = losses.SparseCategoricalCrossentropy()
y_true = ragged_factory_ops.constant([[0, 1], [2]])
y_pred = ragged_factory_ops.constant(
[[[.9, .05, .05], [.5, .89, .6]], [[.05, .01, .94]]],
ragged_rank=1,
dtype=dtypes.float32)
# batch losses [[0.1054, 0.8047], [0.0619]]
sample_weight = constant_op.constant([[1.2], [3.4]], shape=(2, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
# sum([0.1054, 0.8047, 0.0619]) / 3
self.assertAlmostEqual(self.evaluate(loss), 0.4341, 3)
# Test with logits.
logits = ragged_factory_ops.constant(
[[[8., 1., 1.], [0., 9., 1.]], [[2., 3., 5.]]], ragged_rank=1)
cce_obj = losses.SparseCategoricalCrossentropy(from_logits=True)
# batch losses [[0.0018, 0.0004], [0.1698]]
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0.1934, 3)
def test_ragged_tensors_3d(self):
# shape [2, 1, None]
y_true = ragged_factory_ops.constant([[[1, 1]], [[0]]])
# shape [2, 1, None, 2]
y_pred = ragged_factory_ops.constant([[[[0.1, 0.9], [0.1, 0.9]]],
[[[0.9, 0.1]]]])
cce_obj = losses.SparseCategoricalCrossentropy()
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(self.evaluate(loss), 0.1054, 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class HingeTest(test.TestCase):
def test_config(self):
hinge_obj = losses.Hinge(
reduction=losses_utils.ReductionV2.SUM, name='hinge_loss')
self.assertEqual(hinge_obj.name, 'hinge_loss')
self.assertEqual(hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
hinge_obj = losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# reduced loss = (0.6 + 0.4125) / 2
loss = hinge_obj(y_true, y_pred)
self.assertAllClose(0.506, self.evaluate(loss), atol=1e-3)
def test_scalar_weighted(self):
hinge_obj = losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted_loss = [0.6 * 2.3, 0.4125 * 2.3]
# reduced loss = (0.6 + 0.4125) * 2.3 / 2
loss = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 1.164, 3)
# Verify we get the same output when the same input is given
loss_2 = hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAllClose(self.evaluate(loss), self.evaluate(loss_2), 1e-3)
def test_sample_weighted(self):
hinge_obj = losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# loss = [(0.7 + 0.8 + 0.9 + 0) / 4, (0.75 + 0 + 0.5 + 0.4) / 4]
# = [0.6, 0.4125]
# weighted loss = [0.6 * 1.2, 0.4125 * 3.4]
# reduced loss = (0.6 * 1.2 + 0.4125 * 3.4) / 2
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 1.061, 1e-3)
def test_timestep_weighted(self):
hinge_obj = losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
y_pred = constant_op.constant(
[[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1))
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
# y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
# [[0.25], [1], [0.5], [0.6]]]
# 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
# [[0.75], [0], [0.5], [0.4]]]
# loss = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# weighted loss = [[2.1, 4.8, 4.5, 0], [3, 0, 0.5, 1.2]]
# reduced loss = (2.1 + 4.8 + 4.5 + 0 + 3 + 0 + 0.5 + 1.2) / 8
loss = hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 2.012, 1e-3)
def test_zero_weighted(self):
hinge_obj = losses.Hinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
loss = hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAllClose(self.evaluate(loss), 0., 1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class SquaredHingeTest(test.TestCase):
def test_config(self):
sq_hinge_obj = losses.SquaredHinge(
reduction=losses_utils.ReductionV2.SUM, name='sq_hinge_loss')
self.assertEqual(sq_hinge_obj.name, 'sq_hinge_loss')
self.assertEqual(sq_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# reduced loss = (0.485 + 0.2431) / 2
loss = sq_hinge_obj(y_true, y_pred)
self.assertAllClose(self.evaluate(loss), 0.364, 1e-3)
def test_scalar_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted loss = [0.485 * 2.3, 0.2431 * 2.3]
# reduced loss = (0.485 + 0.2431) * 2.3 / 2
loss = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAllClose(self.evaluate(loss), 0.837, 1e-3)
# Verify we get the same output when the same input is given
loss_2 = sq_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[-1, 1, -1, 1], [-1, -1, 1, 1]]
# y_true * y_pred = [[0.3, 0.2, 0.1, 1.6], [0.25, 1, 0.5, 0.6]]
# 1 - y_true * y_pred = [[0.7, 0.8, 0.9, -0.6], [0.75, 0, 0.5, 0.4]]
# max(0, 1 - y_true * y_pred) = [[0.7, 0.8, 0.9, 0], [0.75, 0, 0.5, 0.4]]
# squared(max(0, 1 - y_true * y_pred)) = [[0.49, 0.64, 0.81, 0],
# [0.5625, 0, 0.25, 0.16]]
# loss = [(0.49 + 0.64 + 0.81 + 0) / 4, (0.5625 + 0 + 0.25 + 0.16) / 4]
# = [0.485, 0.2431]
# weighted loss = [0.485 * 1.2, 0.2431 * 3.4]
# reduced loss = (0.485 * 1.2 + 0.2431 * 3.4) / 2
sample_weight = constant_op.constant([1.2, 3.4])
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 0.704, 1e-3)
def test_timestep_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]], shape=(2, 4, 1))
y_pred = constant_op.constant(
[[-0.3, 0.2, -0.1, 1.6], [-0.25, -1., 0.5, 0.6]], shape=(2, 4, 1))
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2, 1, 3], shape=(2, 4))
# loss = max(0, 1-y_true * y_pred), where y_true is -1/1
# y_true = [[[-1], [1], [-1], [1]], [[-1], [-1], [1], [1]]]
# y_true * y_pred = [[[0.3], [0.2], [0.1], [1.6]],
# [[0.25], [1], [0.5], [0.6]]]
# 1 - y_true * y_pred = [[[0.7], [0.8], [0.9], [-0.6]],
# [[0.75], [0], [0.5], [0.4]]]
# loss = [[0.49, 0.64, 0.81, 0], [0.5625, 0, 0.25, 0.16]]
# weighted loss = [[1.47, 3.84, 4.05, 0], [2.25, 0, 0.25, 0.48]]
# reduced loss = (1.47 + 3.84 + 4.05 + 0 + 2.25 + 0 + 0.25 + 0.48) / 8
loss = sq_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(self.evaluate(loss), 1.542, 1e-3)
def test_zero_weighted(self):
sq_hinge_obj = losses.SquaredHinge()
y_true = constant_op.constant([[0, 1, 0, 1], [0, 0, 1, 1]])
y_pred = constant_op.constant([[-0.3, 0.2, -0.1, 1.6],
[-0.25, -1., 0.5, 0.6]])
loss = sq_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAllClose(self.evaluate(loss), 0., 1e-3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CategoricalHingeTest(test.TestCase):
def test_config(self):
cat_hinge_obj = losses.CategoricalHinge(
reduction=losses_utils.ReductionV2.SUM, name='cat_hinge_loss')
self.assertEqual(cat_hinge_obj.name, 'cat_hinge_loss')
self.assertEqual(cat_hinge_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5], shape=(2, 2))
y_pred = constant_op.constant([4, 8, 12, 8],
shape=(2, 2),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred)
# pos = reduce_sum(y_true * y_pred) = [1*4+8*9, 12*2+8*-5] = [76, -16]
# neg = reduce_max((1. - y_true) * y_pred) = [[0, -64], [-12, 48]] = [0, 48]
# cat_hinge = max(0., neg - pos + 1.) = [0, 65]
# reduced_loss = (0 + 65)/2 = 32.5
self.assertAlmostEqual(self.evaluate(loss), 32.5, 3)
def test_scalar_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), 83.95, 3)
# Verify we get the same output when the same input is given
loss_2 = cat_hinge_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 124.1, 3)
def test_timestep_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3, 1))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3, 1),
dtype=dtypes.float32)
sample_weight = constant_op.constant([3, 6, 5, 0, 4, 2], shape=(2, 3))
loss = cat_hinge_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 4.0, 3)
def test_zero_weighted(self):
cat_hinge_obj = losses.CategoricalHinge()
y_true = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
y_pred = constant_op.constant([4, 8, 12, 8, 1, 3],
shape=(2, 3),
dtype=dtypes.float32)
loss = cat_hinge_obj(y_true, y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class LogCoshTest(test.TestCase):
def setup(self):
y_pred = np.asarray([1, 9, 2, -5, -2, 6]).reshape((2, 3))
y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
error = y_pred - y_true
self.expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
self.y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(y_true)
def test_config(self):
logcosh_obj = losses.LogCosh(
reduction=losses_utils.ReductionV2.SUM, name='logcosh_loss')
self.assertEqual(logcosh_obj.name, 'logcosh_loss')
self.assertEqual(logcosh_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
loss = logcosh_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 2.3
loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = sample_weight * np.sum(
self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
y_true = np.asarray([1, 9, 2, -5, -2, 6]).reshape(2, 3, 1)
y_pred = np.asarray([4, 8, 12, 8, 1, 3]).reshape(2, 3, 1)
error = y_pred - y_true
expected_losses = np.log((np.exp(error) + np.exp(-error)) / 2)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(y_true)
loss = logcosh_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
logcosh_obj = losses.LogCosh()
sample_weight = 0
loss = logcosh_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class PoissonTest(test.TestCase):
def setup(self):
self.np_y_pred = np.asarray([1, 9, 2, 5, 2, 6]).reshape((2, 3))
self.np_y_true = np.asarray([4, 8, 12, 8, 1, 3]).reshape((2, 3))
self.batch_size = 6
self.expected_losses = self.np_y_pred - np.multiply(self.np_y_true,
np.log(self.np_y_pred))
self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(self.np_y_true)
def test_config(self):
poisson_obj = losses.Poisson(
reduction=losses_utils.ReductionV2.SUM, name='poisson')
self.assertEqual(poisson_obj.name, 'poisson')
self.assertEqual(poisson_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
poisson_obj = losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
sample_weight = 2.3
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = sample_weight * np.sum(
self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
y_true = self.np_y_true.reshape(2, 3, 1)
y_pred = self.np_y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3, 1)
expected_losses = y_pred - np.multiply(y_true, np.log(y_pred))
y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(y_true)
loss = poisson_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
expected_loss = np.sum(expected_losses * sample_weight) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
poisson_obj = losses.Poisson()
loss = poisson_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class KLDivergenceTest(test.TestCase):
def setup(self):
self.np_y_pred = np.asarray([.4, .9, .12, .36, .3, .4]).reshape((2, 3))
self.np_y_true = np.asarray([.5, .8, .12, .7, .43, .8]).reshape((2, 3))
self.batch_size = 2
self.expected_losses = np.multiply(self.np_y_true,
np.log(self.np_y_true / self.np_y_pred))
self.y_pred = constant_op.constant(self.np_y_pred, dtype=dtypes.float32)
self.y_true = constant_op.constant(self.np_y_true)
def test_config(self):
k_obj = losses.KLDivergence(
reduction=losses_utils.ReductionV2.SUM, name='kld')
self.assertEqual(k_obj.name, 'kld')
self.assertEqual(k_obj.reduction, losses_utils.ReductionV2.SUM)
def test_unweighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred)
expected_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_scalar_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = 2.3
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = sample_weight * np.sum(
self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
sample_weight = constant_op.constant([1.2, 3.4], shape=(2, 1))
loss = k_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
expected_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape(2, 3))
expected_loss = np.sum(expected_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_timestep_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
y_true = self.np_y_true.reshape(2, 3, 1)
y_pred = self.np_y_pred.reshape(2, 3, 1)
sample_weight = np.asarray([3, 6, 5, 0, 4, 2]).reshape(2, 3)
expected_losses = np.sum(
np.multiply(y_true, np.log(y_true / y_pred)), axis=-1)
y_pred = constant_op.constant(y_pred, dtype=dtypes.float32)
y_true = constant_op.constant(y_true)
loss = k_obj(
y_true, y_pred, sample_weight=constant_op.constant(sample_weight))
num_timesteps = 3
expected_loss = np.sum(expected_losses * sample_weight) / (
self.batch_size * num_timesteps)
self.assertAlmostEqual(self.evaluate(loss), expected_loss, 3)
def test_zero_weighted(self):
self.setup()
k_obj = losses.KLDivergence()
loss = k_obj(self.y_true, self.y_pred, sample_weight=0)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class HuberLossTest(test.TestCase):
def huber_loss(self, y_true, y_pred, delta=1.0):
error = y_pred - y_true
abs_error = np.abs(error)
quadratic = np.minimum(abs_error, delta)
linear = np.subtract(abs_error, quadratic)
return np.add(
np.multiply(0.5, np.multiply(quadratic, quadratic)),
np.multiply(delta, linear))
def setup(self, delta=1.0):
self.np_y_pred = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
self.np_y_true = np.asarray([1., 0., 1., 1., 0., 0.]).reshape((2, 3))
self.batch_size = 6
self.expected_losses = self.huber_loss(self.np_y_true, self.np_y_pred,
delta)
self.y_pred = constant_op.constant(self.np_y_pred)
self.y_true = constant_op.constant(self.np_y_true)
def test_config(self):
h_obj = losses.Huber(reduction=losses_utils.ReductionV2.SUM, name='huber')
self.assertEqual(h_obj.name, 'huber')
self.assertEqual(h_obj.reduction, losses_utils.ReductionV2.SUM)
def test_all_correct(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
def test_unweighted(self):
self.setup()
h_obj = losses.Huber()
loss = h_obj(self.y_true, self.y_pred)
actual_loss = np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_scalar_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
# Verify we get the same output when the same input is given
loss_2 = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), self.evaluate(loss_2), 3)
def test_sample_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = constant_op.constant((1.2, 3.4), shape=(2, 1))
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = np.multiply(
self.expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_timestep_weighted(self):
self.setup()
h_obj = losses.Huber()
y_pred = self.np_y_pred.reshape((2, 3, 1))
y_true = self.np_y_true.reshape((2, 3, 1))
expected_losses = self.huber_loss(y_true, y_pred)
y_pred = constant_op.constant(y_pred)
y_true = constant_op.constant(y_true)
sample_weight = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3, 1))
loss = h_obj(
y_true,
y_pred,
sample_weight=constant_op.constant(sample_weight, shape=(2, 3)))
actual_loss = np.multiply(expected_losses, sample_weight)
actual_loss = np.sum(actual_loss) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_zero_weighted(self):
self.setup()
h_obj = losses.Huber()
sample_weight = 0
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(self.evaluate(loss), 0., 3)
def test_non_default_delta(self):
self.setup(delta=0.8)
h_obj = losses.Huber(delta=0.8)
sample_weight = 2.3
loss = h_obj(self.y_true, self.y_pred, sample_weight=sample_weight)
actual_loss = sample_weight * np.sum(self.expected_losses) / self.batch_size
self.assertAlmostEqual(self.evaluate(loss), actual_loss, 3)
def test_loss_with_non_default_dtype(self):
# Test case for GitHub issue:
# https://github.com/tensorflow/tensorflow/issues/39004
self.setup()
h_obj = losses.Huber()
try:
backend.set_floatx('float64')
loss = h_obj(self.y_true, self.y_true)
self.assertAlmostEqual(self.evaluate(loss), 0.0, 3)
finally:
backend.set_floatx('float32')
class BinaryTruePositivesViaControlFlow(losses.Loss):
def __init__(self, reduction=losses_utils.ReductionV2.AUTO):
super().__init__(reduction=reduction)
def call(self, y_true, y_pred):
y_true = math_ops.cast(y_true, dtypes.bool)
y_pred = math_ops.cast(y_pred, dtypes.bool)
result = constant_op.constant(0.0)
for i in range(len(y_true)):
for j in range(len(y_true[i])):
if y_true[i][j] and y_pred[i][j]:
result = result + 1
return result
@combinations.generate(combinations.combine(mode=['graph', 'eager']))
class CustomLossTest(test.TestCase):
def test_autograph(self):
y_true = constant_op.constant([[0, 0.9, 0, 1, 0], [0, 0, 1, 1, 1],
[1, 1, 1, 1, 0], [0, 0, 0, 0, 1.5]])
y_pred = constant_op.constant([[0, 0, 1, 5, 0], [1, 1, 1, 1, 1],
[0, 1, 0, 1, 0], [1, 10, 1, 1, 1]])
@def_function.function
def loss_fn(y_true, y_pred):
loss_obj = BinaryTruePositivesViaControlFlow()
return loss_obj(y_true, y_pred)
loss = loss_fn(y_true, y_pred)
self.assertAllEqual(
self.evaluate(loss),
7.0,
)
if __name__ == '__main__':
test.main()
| 42.621131
| 80
| 0.616762
|
2a8f73e1f7e0db8837109bb4a82d3ac71a3bbb95
| 26,103
|
py
|
Python
|
backend/core/migrations/0001_initial.py
|
iamnkc/tournesol
|
4a09985f494577917c357783a37dfae02c57fd82
|
[
"CC0-1.0"
] | null | null | null |
backend/core/migrations/0001_initial.py
|
iamnkc/tournesol
|
4a09985f494577917c357783a37dfae02c57fd82
|
[
"CC0-1.0"
] | null | null | null |
backend/core/migrations/0001_initial.py
|
iamnkc/tournesol
|
4a09985f494577917c357783a37dfae02c57fd82
|
[
"CC0-1.0"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-06-06 13:49
import core.utils.models
import core.utils.validators
import django.contrib.auth.models
import django.contrib.auth.validators
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_demo', models.BooleanField(default=False, help_text='Is a demo account?')),
('first_name', models.CharField(blank=True, help_text='First name', max_length=100, null=True)),
('last_name', models.CharField(blank=True, help_text='Last name', max_length=100, null=True)),
('title', models.TextField(blank=True, help_text='Your position', null=True)),
('bio', models.TextField(blank=True, help_text='Self-description (degree, biography, ...)', null=True)),
('comment_anonymously', models.BooleanField(default=False, help_text='Comment anonymously by-default')),
('show_online_presence', models.BooleanField(default=False, help_text='Show my online presence on Tournesol')),
('show_my_profile', models.BooleanField(default=True, help_text='Show my profile on Tournesol')),
('birth_year', models.IntegerField(blank=True, help_text='Year of birth', null=True, validators=[django.core.validators.MinValueValidator(1900), django.core.validators.MaxValueValidator(2100)])),
('gender', models.CharField(choices=[('Not Specified', 'Not Specified'), ('Non-binary', 'Non-binary'), ('Other', 'Other'), ('Female', 'Female'), ('Male', 'Male')], default='Not Specified', help_text='Your gender', max_length=50)),
('nationality', models.CharField(choices=[('Not Specified', 'Not Specified'), ('Afghanistan', 'Afghanistan'), ('Åland Islands', 'Åland Islands'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('American Samoa', 'American Samoa'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Anguilla', 'Anguilla'), ('Antarctica', 'Antarctica'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas', 'Bahamas'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bermuda', 'Bermuda'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bonaire, Sint Eustatius and Saba', 'Bonaire, Sint Eustatius and Saba'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Bouvet Island', 'Bouvet Island'), ('Brazil', 'Brazil'), ('British Indian Ocean Territory', 'British Indian Ocean Territory'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burundi', 'Burundi'), ('Cabo Verde', 'Cabo Verde'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Canada', 'Canada'), ('Cayman Islands', 'Cayman Islands'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Christmas Island', 'Christmas Island'), ('Cocos (Keeling) Islands', 'Cocos (Keeling) Islands'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo', 'Congo'), ('Congo (the Democratic Republic of the)', 'Congo (the Democratic Republic of the)'), ('Cook Islands', 'Cook Islands'), ('Costa Rica', 'Costa Rica'), ("Côte d'Ivoire", "Côte d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curaçao', 'Curaçao'), ('Cyprus', 'Cyprus'), ('Czechia', 'Czechia'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Eswatini', 'Eswatini'), ('Ethiopia', 'Ethiopia'), ('Falkland Islands (Malvinas)', 'Falkland Islands (Malvinas)'), ('Faroe Islands', 'Faroe Islands'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('French Guiana', 'French Guiana'), ('French Polynesia', 'French Polynesia'), ('French Southern Territories', 'French Southern Territories'), ('Gabon', 'Gabon'), ('Gambia', 'Gambia'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Gibraltar', 'Gibraltar'), ('Greece', 'Greece'), ('Greenland', 'Greenland'), ('Grenada', 'Grenada'), ('Guadeloupe', 'Guadeloupe'), ('Guam', 'Guam'), ('Guatemala', 'Guatemala'), ('Guernsey', 'Guernsey'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Heard Island and McDonald Islands', 'Heard Island and McDonald Islands'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Isle of Man', 'Isle of Man'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jersey', 'Jersey'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macao', 'Macao'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Martinique', 'Martinique'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mayotte', 'Mayotte'), ('Mexico', 'Mexico'), ('Micronesia (Federated States of)', 'Micronesia (Federated States of)'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Montserrat', 'Montserrat'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Myanmar', 'Myanmar'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('New Caledonia', 'New Caledonia'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('Niue', 'Niue'), ('Norfolk Island', 'Norfolk Island'), ('North Korea', 'North Korea'), ('North Macedonia', 'North Macedonia'), ('Northern Mariana Islands', 'Northern Mariana Islands'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestine, State of', 'Palestine, State of'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Pitcairn', 'Pitcairn'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Puerto Rico', 'Puerto Rico'), ('Qatar', 'Qatar'), ('Réunion', 'Réunion'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Barthélemy', 'Saint Barthélemy'), ('Saint Helena, Ascension and Tristan da Cunha', 'Saint Helena, Ascension and Tristan da Cunha'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Martin (French part)', 'Saint Martin (French part)'), ('Saint Pierre and Miquelon', 'Saint Pierre and Miquelon'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten (Dutch part)', 'Sint Maarten (Dutch part)'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Georgia and the South Sandwich Islands', 'South Georgia and the South Sandwich Islands'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Svalbard and Jan Mayen', 'Svalbard and Jan Mayen'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tokelau', 'Tokelau'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Turks and Caicos Islands', 'Turks and Caicos Islands'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('United States Minor Outlying Islands', 'United States Minor Outlying Islands'), ('United States of America', 'United States of America'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Virgin Islands (British)', 'Virgin Islands (British)'), ('Virgin Islands (U.S.)', 'Virgin Islands (U.S.)'), ('Wallis and Futuna', 'Wallis and Futuna'), ('Western Sahara', 'Western Sahara'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe')], default='Not Specified', help_text='Your country of nationality', max_length=100)),
('residence', models.CharField(choices=[('Not Specified', 'Not Specified'), ('Afghanistan', 'Afghanistan'), ('Åland Islands', 'Åland Islands'), ('Albania', 'Albania'), ('Algeria', 'Algeria'), ('American Samoa', 'American Samoa'), ('Andorra', 'Andorra'), ('Angola', 'Angola'), ('Anguilla', 'Anguilla'), ('Antarctica', 'Antarctica'), ('Antigua and Barbuda', 'Antigua and Barbuda'), ('Argentina', 'Argentina'), ('Armenia', 'Armenia'), ('Aruba', 'Aruba'), ('Australia', 'Australia'), ('Austria', 'Austria'), ('Azerbaijan', 'Azerbaijan'), ('Bahamas', 'Bahamas'), ('Bahrain', 'Bahrain'), ('Bangladesh', 'Bangladesh'), ('Barbados', 'Barbados'), ('Belarus', 'Belarus'), ('Belgium', 'Belgium'), ('Belize', 'Belize'), ('Benin', 'Benin'), ('Bermuda', 'Bermuda'), ('Bhutan', 'Bhutan'), ('Bolivia', 'Bolivia'), ('Bonaire, Sint Eustatius and Saba', 'Bonaire, Sint Eustatius and Saba'), ('Bosnia and Herzegovina', 'Bosnia and Herzegovina'), ('Botswana', 'Botswana'), ('Bouvet Island', 'Bouvet Island'), ('Brazil', 'Brazil'), ('British Indian Ocean Territory', 'British Indian Ocean Territory'), ('Brunei', 'Brunei'), ('Bulgaria', 'Bulgaria'), ('Burkina Faso', 'Burkina Faso'), ('Burundi', 'Burundi'), ('Cabo Verde', 'Cabo Verde'), ('Cambodia', 'Cambodia'), ('Cameroon', 'Cameroon'), ('Canada', 'Canada'), ('Cayman Islands', 'Cayman Islands'), ('Central African Republic', 'Central African Republic'), ('Chad', 'Chad'), ('Chile', 'Chile'), ('China', 'China'), ('Christmas Island', 'Christmas Island'), ('Cocos (Keeling) Islands', 'Cocos (Keeling) Islands'), ('Colombia', 'Colombia'), ('Comoros', 'Comoros'), ('Congo', 'Congo'), ('Congo (the Democratic Republic of the)', 'Congo (the Democratic Republic of the)'), ('Cook Islands', 'Cook Islands'), ('Costa Rica', 'Costa Rica'), ("Côte d'Ivoire", "Côte d'Ivoire"), ('Croatia', 'Croatia'), ('Cuba', 'Cuba'), ('Curaçao', 'Curaçao'), ('Cyprus', 'Cyprus'), ('Czechia', 'Czechia'), ('Denmark', 'Denmark'), ('Djibouti', 'Djibouti'), ('Dominica', 'Dominica'), ('Dominican Republic', 'Dominican Republic'), ('Ecuador', 'Ecuador'), ('Egypt', 'Egypt'), ('El Salvador', 'El Salvador'), ('Equatorial Guinea', 'Equatorial Guinea'), ('Eritrea', 'Eritrea'), ('Estonia', 'Estonia'), ('Eswatini', 'Eswatini'), ('Ethiopia', 'Ethiopia'), ('Falkland Islands (Malvinas)', 'Falkland Islands (Malvinas)'), ('Faroe Islands', 'Faroe Islands'), ('Fiji', 'Fiji'), ('Finland', 'Finland'), ('France', 'France'), ('French Guiana', 'French Guiana'), ('French Polynesia', 'French Polynesia'), ('French Southern Territories', 'French Southern Territories'), ('Gabon', 'Gabon'), ('Gambia', 'Gambia'), ('Georgia', 'Georgia'), ('Germany', 'Germany'), ('Ghana', 'Ghana'), ('Gibraltar', 'Gibraltar'), ('Greece', 'Greece'), ('Greenland', 'Greenland'), ('Grenada', 'Grenada'), ('Guadeloupe', 'Guadeloupe'), ('Guam', 'Guam'), ('Guatemala', 'Guatemala'), ('Guernsey', 'Guernsey'), ('Guinea', 'Guinea'), ('Guinea-Bissau', 'Guinea-Bissau'), ('Guyana', 'Guyana'), ('Haiti', 'Haiti'), ('Heard Island and McDonald Islands', 'Heard Island and McDonald Islands'), ('Holy See', 'Holy See'), ('Honduras', 'Honduras'), ('Hong Kong', 'Hong Kong'), ('Hungary', 'Hungary'), ('Iceland', 'Iceland'), ('India', 'India'), ('Indonesia', 'Indonesia'), ('Iran', 'Iran'), ('Iraq', 'Iraq'), ('Ireland', 'Ireland'), ('Isle of Man', 'Isle of Man'), ('Israel', 'Israel'), ('Italy', 'Italy'), ('Jamaica', 'Jamaica'), ('Japan', 'Japan'), ('Jersey', 'Jersey'), ('Jordan', 'Jordan'), ('Kazakhstan', 'Kazakhstan'), ('Kenya', 'Kenya'), ('Kiribati', 'Kiribati'), ('Kuwait', 'Kuwait'), ('Kyrgyzstan', 'Kyrgyzstan'), ('Laos', 'Laos'), ('Latvia', 'Latvia'), ('Lebanon', 'Lebanon'), ('Lesotho', 'Lesotho'), ('Liberia', 'Liberia'), ('Libya', 'Libya'), ('Liechtenstein', 'Liechtenstein'), ('Lithuania', 'Lithuania'), ('Luxembourg', 'Luxembourg'), ('Macao', 'Macao'), ('Madagascar', 'Madagascar'), ('Malawi', 'Malawi'), ('Malaysia', 'Malaysia'), ('Maldives', 'Maldives'), ('Mali', 'Mali'), ('Malta', 'Malta'), ('Marshall Islands', 'Marshall Islands'), ('Martinique', 'Martinique'), ('Mauritania', 'Mauritania'), ('Mauritius', 'Mauritius'), ('Mayotte', 'Mayotte'), ('Mexico', 'Mexico'), ('Micronesia (Federated States of)', 'Micronesia (Federated States of)'), ('Moldova', 'Moldova'), ('Monaco', 'Monaco'), ('Mongolia', 'Mongolia'), ('Montenegro', 'Montenegro'), ('Montserrat', 'Montserrat'), ('Morocco', 'Morocco'), ('Mozambique', 'Mozambique'), ('Myanmar', 'Myanmar'), ('Namibia', 'Namibia'), ('Nauru', 'Nauru'), ('Nepal', 'Nepal'), ('Netherlands', 'Netherlands'), ('New Caledonia', 'New Caledonia'), ('New Zealand', 'New Zealand'), ('Nicaragua', 'Nicaragua'), ('Niger', 'Niger'), ('Nigeria', 'Nigeria'), ('Niue', 'Niue'), ('Norfolk Island', 'Norfolk Island'), ('North Korea', 'North Korea'), ('North Macedonia', 'North Macedonia'), ('Northern Mariana Islands', 'Northern Mariana Islands'), ('Norway', 'Norway'), ('Oman', 'Oman'), ('Pakistan', 'Pakistan'), ('Palau', 'Palau'), ('Palestine, State of', 'Palestine, State of'), ('Panama', 'Panama'), ('Papua New Guinea', 'Papua New Guinea'), ('Paraguay', 'Paraguay'), ('Peru', 'Peru'), ('Philippines', 'Philippines'), ('Pitcairn', 'Pitcairn'), ('Poland', 'Poland'), ('Portugal', 'Portugal'), ('Puerto Rico', 'Puerto Rico'), ('Qatar', 'Qatar'), ('Réunion', 'Réunion'), ('Romania', 'Romania'), ('Russia', 'Russia'), ('Rwanda', 'Rwanda'), ('Saint Barthélemy', 'Saint Barthélemy'), ('Saint Helena, Ascension and Tristan da Cunha', 'Saint Helena, Ascension and Tristan da Cunha'), ('Saint Kitts and Nevis', 'Saint Kitts and Nevis'), ('Saint Lucia', 'Saint Lucia'), ('Saint Martin (French part)', 'Saint Martin (French part)'), ('Saint Pierre and Miquelon', 'Saint Pierre and Miquelon'), ('Saint Vincent and the Grenadines', 'Saint Vincent and the Grenadines'), ('Samoa', 'Samoa'), ('San Marino', 'San Marino'), ('Sao Tome and Principe', 'Sao Tome and Principe'), ('Saudi Arabia', 'Saudi Arabia'), ('Senegal', 'Senegal'), ('Serbia', 'Serbia'), ('Seychelles', 'Seychelles'), ('Sierra Leone', 'Sierra Leone'), ('Singapore', 'Singapore'), ('Sint Maarten (Dutch part)', 'Sint Maarten (Dutch part)'), ('Slovakia', 'Slovakia'), ('Slovenia', 'Slovenia'), ('Solomon Islands', 'Solomon Islands'), ('Somalia', 'Somalia'), ('South Africa', 'South Africa'), ('South Georgia and the South Sandwich Islands', 'South Georgia and the South Sandwich Islands'), ('South Korea', 'South Korea'), ('South Sudan', 'South Sudan'), ('Spain', 'Spain'), ('Sri Lanka', 'Sri Lanka'), ('Sudan', 'Sudan'), ('Suriname', 'Suriname'), ('Svalbard and Jan Mayen', 'Svalbard and Jan Mayen'), ('Sweden', 'Sweden'), ('Switzerland', 'Switzerland'), ('Syria', 'Syria'), ('Taiwan', 'Taiwan'), ('Tajikistan', 'Tajikistan'), ('Tanzania', 'Tanzania'), ('Thailand', 'Thailand'), ('Timor-Leste', 'Timor-Leste'), ('Togo', 'Togo'), ('Tokelau', 'Tokelau'), ('Tonga', 'Tonga'), ('Trinidad and Tobago', 'Trinidad and Tobago'), ('Tunisia', 'Tunisia'), ('Turkey', 'Turkey'), ('Turkmenistan', 'Turkmenistan'), ('Turks and Caicos Islands', 'Turks and Caicos Islands'), ('Tuvalu', 'Tuvalu'), ('Uganda', 'Uganda'), ('Ukraine', 'Ukraine'), ('United Arab Emirates', 'United Arab Emirates'), ('United Kingdom', 'United Kingdom'), ('United States Minor Outlying Islands', 'United States Minor Outlying Islands'), ('United States of America', 'United States of America'), ('Uruguay', 'Uruguay'), ('Uzbekistan', 'Uzbekistan'), ('Vanuatu', 'Vanuatu'), ('Venezuela', 'Venezuela'), ('Vietnam', 'Vietnam'), ('Virgin Islands (British)', 'Virgin Islands (British)'), ('Virgin Islands (U.S.)', 'Virgin Islands (U.S.)'), ('Wallis and Futuna', 'Wallis and Futuna'), ('Western Sahara', 'Western Sahara'), ('Yemen', 'Yemen'), ('Zambia', 'Zambia'), ('Zimbabwe', 'Zimbabwe')], default='Not Specified', help_text='Your country of residence', max_length=100)),
('race', models.CharField(choices=[('Not Specified', 'Not Specified'), ('African', 'African'), ('African American', 'African American'), ('American Indian', 'American Indian'), ('Arabic or Middle Eastern', 'Arabic or Middle Eastern'), ('Asian', 'Asian'), ('Caucasian', 'Caucasian'), ('Latino or Hispanic', 'Latino or Hispanic'), ('Mixed', 'Mixed'), ('Unknown', 'Unknown'), ('Other', 'Other')], default='Not Specified', help_text='Your ethnicity', max_length=50)),
('political_affiliation', models.CharField(choices=[('Not Specified', 'Not Specified'), ('Extreme left', 'Extreme left'), ('Far left', 'Far left'), ('Left', 'Left'), ('Centrist', 'Centrist'), ('Right', 'Right'), ('Far right', 'Far right'), ('Extreme right', 'Extreme right'), ('Other', 'Other')], default='Not Specified', help_text='Your political preference', max_length=50)),
('religion', models.CharField(choices=[('Not Specified', 'Not Specified'), ('Christian', 'Christian'), ('Muslim', 'Muslim'), ('Hindu', 'Hindu'), ('Buddhist', 'Buddhist'), ('Jewish', 'Jewish'), ('Atheist', 'Atheist'), ('Agnostic', 'Agnostic'), ('Other', 'Other')], default='Not Specified', help_text='Your religion', max_length=50)),
('degree_of_political_engagement', models.CharField(choices=[('Not Specified', 'Not Specified'), ('None', 'None'), ('Light', 'Light'), ('Interested', 'Interested'), ('Engaged', 'Engaged'), ('Activist', 'Activist'), ('Professional', 'Professional')], default='Not Specified', help_text='Your degree of political engagement', max_length=50)),
('moral_philosophy', models.CharField(choices=[('Not Specified', 'Not Specified'), ('Utilitarian', 'Utilitarian'), ('Non-Utilitarian Consequentialist', 'Non-Utilitarian Consequentialist'), ('Deontological', 'Deontological'), ('Virtue Ethics', 'Virtue Ethics'), ('Mixed', 'Mixed'), ('Other', 'Other')], default='Not Specified', help_text='Your preferred moral philosophy', max_length=50)),
('website', models.URLField(blank=True, help_text='Your website URL', max_length=500, null=True)),
('linkedin', models.URLField(blank=True, help_text='Your LinkedIn URL', max_length=500, null=True)),
('youtube', models.URLField(blank=True, help_text='Your Youtube channel URL', max_length=500, null=True)),
('google_scholar', models.URLField(blank=True, default=None, help_text='Your Google Scholar URL', max_length=500, null=True)),
('orcid', models.URLField(blank=True, help_text='Your ORCID URL', max_length=500, null=True)),
('researchgate', models.URLField(blank=True, help_text='Your Researchgate profile URL', max_length=500, null=True)),
('twitter', models.URLField(blank=True, help_text='Your Twitter URL', max_length=500, null=True)),
('avatar', models.ImageField(blank=True, help_text='Your profile picture.', null=True, upload_to='profiles', validators=[core.utils.validators.validate_avatar])),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='EmailDomain',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(help_text='E-mail domain with leading @', max_length=100, unique=True)),
('status', models.CharField(choices=[('RJ', 'Rejected'), ('ACK', 'Accepted'), ('PD', 'Pending')], default='PD', help_text='Status of the domain.', max_length=10)),
('datetime_add', models.DateTimeField(auto_now_add=True, help_text='Time the domain was added', null=True)),
],
options={
'ordering': ['-datetime_add', 'domain'],
},
),
migrations.CreateModel(
name='VerifiableEmail',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(help_text='E-mail address', max_length=100)),
('is_verified', models.BooleanField(default=False, help_text='Verified')),
('last_verification_email_ts', models.DateTimeField(blank=True, default=None, help_text='Timestamp when the last verification e-mail was sent to this address', null=True)),
('token', models.CharField(blank=True, help_text='The token that needs to be supplied to verify this e-mail address', max_length=1000, null=True)),
('rank', models.IntegerField(default=0, help_text='Ordering field')),
('domain_fk', models.ForeignKey(blank=True, help_text='Foreign key to e-mail domain', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='verifiable_emails_domain', to='core.emaildomain')),
('user', models.ForeignKey(help_text='User that this e-mail belongs to', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='verifiableemails', to='core.user')),
],
options={
'ordering': ['rank'],
},
),
migrations.CreateModel(
name='UserPreferences',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating_mode', models.CharField(choices=[('enable_all', 'enable_all'), ('skip', 'skip'), ('confidence', 'confidence')], default='enable_all', help_text='Which sliders and parameters to display on the rating page?', max_length=50)),
('user', models.OneToOneField(help_text='User that preferences belong to', on_delete=django.db.models.deletion.CASCADE, related_name='userpreferences', to='core.user')),
],
bases=(models.Model, core.utils.models.WithDynamicFields),
),
migrations.AddConstraint(
model_name='emaildomain',
constraint=models.CheckConstraint(check=models.Q(('domain__istartswith', '@')), name='domain_starts_with_at'),
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
migrations.AlterUniqueTogether(
name='verifiableemail',
unique_together={('email', 'user')},
),
]
| 208.824
| 7,796
| 0.639007
|
b00d5fad1d62f75ce9ed4e0615a5bf246a0b8396
| 23,432
|
py
|
Python
|
girderformindlogger/models/item.py
|
ChildMindInstitute/mindlogger-app-backend
|
0470d052c952d51816d06eaab8fcc64ed54d9565
|
[
"Apache-2.0"
] | 3
|
2019-12-18T21:08:48.000Z
|
2020-04-24T23:34:37.000Z
|
girderformindlogger/models/item.py
|
ChildMindInstitute/mindlogger-app-backend
|
0470d052c952d51816d06eaab8fcc64ed54d9565
|
[
"Apache-2.0"
] | 582
|
2018-05-15T22:29:21.000Z
|
2020-05-29T15:21:21.000Z
|
girderformindlogger/models/item.py
|
ChildMindInstitute/mindlogger-app-backend
|
0470d052c952d51816d06eaab8fcc64ed54d9565
|
[
"Apache-2.0"
] | 20
|
2019-01-27T22:41:15.000Z
|
2020-03-18T02:37:01.000Z
|
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import os
import six
import cherrypy
from bson.objectid import ObjectId
from girderformindlogger import events
from girderformindlogger import logger
from girderformindlogger.constants import AccessType
from girderformindlogger.exceptions import ValidationException, GirderException
from girderformindlogger.models.model_base import Model
from girderformindlogger.utility import acl_mixin
from girderformindlogger.utility.model_importer import ModelImporter
class Item(acl_mixin.AccessControlMixin, Model):
"""
Items are leaves in the data hierarchy. They can contain 0 or more
files within them, and can also contain arbitrary metadata.
"""
def initialize(self):
self.name = 'item'
self.ensureIndices(
(
'name',
'lowerName',
'version',
'duplicateOf',
'meta.protocolId',
'meta.activityId',
'meta.screen.@type',
'meta.screen.url',
'meta.historyId',
'meta.identifier',
([
('folderId', 1),
('meta.lastVersion', 1),
], {}),
([
('folderId', 1),
('updated', 1),
], {}),
([
('meta.baseAppletId', 1),
('meta.baseItemId', 1)
], {}),
([
('meta.screen.@type', 1),
('meta.screen.url', 1),
('meta.protocolId', 1),
('meta.activityId', 1),
], {}),
([
('baseParentId', 1),
('meta.applet.@id', 1),
('meta.subject.@id', 1),
('baseParentType', 1),
('isCumulative', 1)
], {}),
([
('meta.applet.@id', 1),
('meta.subject.@id', 1),
('reviewing.responseId', 1)
], {}),
'reviewing.userProfileId'
)
)
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.resourceColl = 'folder'
self.resourceParent = 'folderId'
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'size', 'updated', 'description', 'created', 'meta',
'creatorId', 'folderId', 'name', 'baseParentType', 'baseParentId',
'copyOfItem'))
def _validateString(self, value):
"""
Make sure a value is a string and is stripped of whitespace.
:param value: the value to coerce into a string if it isn't already.
:returns: the string version of the value.
"""
if value is None:
value = ''
if not isinstance(value, six.string_types):
value = str(value)
return value.strip()
def reconnectToDb(self, db_uri=cherrypy.config['database']['uri']):
self.db_uri = db_uri
self.reconnect()
def validate(self, doc):
from girderformindlogger.models.folder import Folder
doc['name'] = self._validateString(doc.get('name', ''))
doc['description'] = self._validateString(doc.get('description', ''))
if not doc['name']:
raise ValidationException('Item name must not be empty.', 'name')
# Ensure unique name among sibling items and folders. If the desired
# name collides with an existing item or folder, we will append (n)
# onto the end of the name, incrementing n until the name is unique.
name = doc['name']
# If the item already exists with the current name, don't check.
# Although we don't want duplicate names, they can occur when there are
# simultaneous uploads, and also because Mongo has no guaranteed
# multi-collection uniqueness constraints. If this occurs, and we are
# changing a non-name property, don't validate the name (since that may
# fail). If the name is being changed, validate that it is probably
# unique.
checkName = '_id' not in doc or not self.findOne({'_id': doc['_id'], 'name': name})
n = 0
while checkName:
q = {
'name': name,
'folderId': doc['folderId']
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
dupItem = self.findOne(q, fields=['_id'])
q = {
'parentId': doc['folderId'],
'name': name,
'parentCollection': 'folder'
}
dupFolder = Folder().findOne(q, fields=['_id'])
if dupItem is None and dupFolder is None:
doc['name'] = name
checkName = False
else:
n += 1
name = '%s (%d)' % (doc['name'], n)
doc['lowerName'] = doc['name'].lower()
return doc
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
Calls AccessControlMixin.load while doing some auto-correction.
Takes the same parameters as
:py:func:`girderformindlogger.models.model_base.AccessControlMixin.load`.
"""
# Ensure we include extra fields to do the migration below
extraFields = {'baseParentId', 'baseParentType', 'parentId', 'parentCollection',
'name', 'lowerName'}
loadFields = self._supplementFields(fields, extraFields)
doc = super(Item, self).load(
id=id, level=level, user=user, objectId=objectId, force=force, fields=loadFields,
exc=exc)
if doc is not None:
if 'baseParentType' not in doc:
pathFromRoot = self.parentsToRoot(doc, user=user, force=True)
baseParent = pathFromRoot[0]
doc['baseParentId'] = baseParent['object']['_id']
doc['baseParentType'] = baseParent['type']
self.update({'_id': doc['_id']}, {'$set': {
'baseParentId': doc['baseParentId'],
'baseParentType': doc['baseParentType']
}})
if 'lowerName' not in doc:
doc['lowerName'] = doc['name'].lower()
self.update({'_id': doc['_id']}, {'$set': {
'lowerName': doc['lowerName']
}})
if 'meta' not in doc:
doc['meta'] = {}
self.update({'_id': doc['_id']}, {'$set': {
'meta': {}
}})
self._removeSupplementalFields(doc, fields)
return doc
def move(self, item, folder):
"""
Move the given item from its current folder into another folder.
:param item: The item to move.
:type item: dict
:param folder: The folder to move the item into.
:type folder: dict.
"""
self.propagateSizeChange(item, -item['size'])
item['folderId'] = folder['_id']
item['baseParentType'] = folder['baseParentType']
item['baseParentId'] = folder['baseParentId']
self.propagateSizeChange(item, item['size'])
return self.save(item)
def propagateSizeChange(self, item, inc):
from girderformindlogger.models.folder import Folder
Folder().increment(query={
'_id': item['folderId']
}, field='size', amount=inc, multi=False)
ModelImporter.model(item['baseParentType']).increment(query={
'_id': item['baseParentId']
}, field='size', amount=inc, multi=False)
def recalculateSize(self, item):
"""
Recalculate the item size based on the files that are in it. If this
is different than the recorded size, propagate the changes.
:param item: The item to recalculate the size of.
:returns: the recalculated size in bytes
"""
size = 0
for file in self.childFiles(item):
# We could add a recalculateSize to the file model, in which case
# this would be:
# size += File().recalculateSize(file)
size += file.get('size', 0)
delta = size - item.get('size', 0)
if delta:
logger.info('Item %s was wrong size: was %d, is %d' % (
item['_id'], item['size'], size))
item['size'] = size
self.update({'_id': item['_id']}, update={'$set': {'size': size}})
self.propagateSizeChange(item, delta)
return size
def childFiles(self, item, limit=0, offset=0, sort=None, **kwargs):
"""
Returns child files of the item. Passes any kwargs to the find
function.
:param item: The parent item.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
"""
from girderformindlogger.models.file import File
q = {
'itemId': item['_id']
}
return File().find(q, limit=limit, offset=offset, sort=sort, **kwargs)
def remove(self, item, **kwargs):
"""
Delete an item, and all references to it in the database.
:param item: The item document to delete.
:type item: dict
"""
from girderformindlogger.models.file import File
from girderformindlogger.models.upload import Upload
# Delete all files in this item
fileModel = File()
files = fileModel.find({
'itemId': item['_id']
})
for file in files:
fileKwargs = kwargs.copy()
fileKwargs.pop('updateItemSize', None)
fileModel.remove(file, updateItemSize=False, **fileKwargs)
# Delete pending uploads into this item
uploadModel = Upload()
uploads = uploadModel.find({
'parentId': item['_id'],
'parentType': 'item'
})
for upload in uploads:
uploadModel.remove(upload, **kwargs)
# Delete the item itself
Model.remove(self, item)
def createItem(self, name, creator, folder, description='',
reuseExisting=False, validate=True):
"""
Create a new item. The creator will be given admin access to it.
:param name: The name of the item.
:type name: str
:param description: Description for the item.
:type description: str
:param folder: The parent folder of the item.
:param creator: User document representing the creator of the item.
:type creator: dict
:param reuseExisting: If an item with the given name already exists
under the given folder, return that item rather than creating a
new one.
:type reuseExisting: bool
:returns: The item document that was created.
"""
if reuseExisting:
existing = self.findOne({
'folderId': folder['_id'],
'name': name
})
if existing:
return existing
now = datetime.datetime.utcnow()
if not isinstance(creator, dict) or '_id' not in creator:
# Internal error -- this shouldn't be called without a user.
raise GirderException('Creator must be a user.',
'girderformindlogger.models.item.creator-not-user')
if 'baseParentType' not in folder:
pathFromRoot = self.parentsToRoot({'folderId': folder['_id']},
creator, force=True)
folder['baseParentType'] = pathFromRoot[0]['type']
folder['baseParentId'] = pathFromRoot[0]['object']['_id']
return self.save({
'name': self._validateString(name),
'description': self._validateString(description),
'folderId': ObjectId(folder['_id']),
'creatorId': creator['_id'],
'baseParentType': folder['baseParentType'],
'baseParentId': folder['baseParentId'],
'created': now,
'updated': now,
'size': 0,
'meta': {}
}, validate=validate)
def updateItem(self, item, folder=None):
"""
Updates an item.
:param item: The item document to update
:type item: dict
:returns: The item document that was edited.
"""
item['updated'] = datetime.datetime.utcnow()
# Validate and save the item
return self.save(item)
def filter(self, doc, user=None, additionalKeys=None):
"""
Overrides the parent ``filter`` method to add an empty meta field
(if it doesn't exist) to the returned folder.
"""
filteredDoc = super(Item, self).filter(doc, user, additionalKeys=additionalKeys)
if 'meta' not in filteredDoc:
filteredDoc['meta'] = {}
return filteredDoc
def setMetadata(self, item, metadata, allowNull=False, validate=True):
"""
Set metadata on an item. A `ValidationException` is thrown in the
cases where the metadata JSON object is badly formed, or if any of the
metadata keys contains a period ('.').
:param item: The item to set the metadata on.
:type item: dict
:param metadata: A dictionary containing key-value pairs to add to
the items meta field
:type metadata: dict
:param allowNull: Whether to allow `null` values to be set in the item's
metadata. If set to `False` or omitted, a `null` value will cause that
metadata field to be deleted.
:returns: the item document
"""
if 'meta' not in item:
item['meta'] = {}
# Add new metadata to existing metadata
item['meta'].update(six.viewitems(metadata))
# Remove metadata fields that were set to null (use items in py3)
if not allowNull:
toDelete = [k for k, v in six.viewitems(metadata) if v is None]
for key in toDelete:
del item['meta'][key]
self.validateKeys(item['meta'])
item['updated'] = datetime.datetime.utcnow()
# Validate and save the item
return self.save(item, validate=validate)
def deleteMetadata(self, item, fields):
"""
Delete metadata on an item. A `ValidationException` is thrown if the
metadata field names contain a period ('.') or begin with a dollar sign
('$').
:param item: The item to delete metadata from.
:type item: dict
:param fields: An array containing the field names to delete from the
item's meta field
:type field: list
:returns: the item document
"""
self.validateKeys(fields)
if 'meta' not in item:
item['meta'] = {}
for field in fields:
item['meta'].pop(field, None)
item['updated'] = datetime.datetime.utcnow()
return self.save(item)
def parentsToRoot(self, item, user=None, force=False):
"""
Get the path to traverse to a root of the hierarchy.
:param item: The item whose root to find
:type item: dict
:param user: The user making the request (not required if force=True).
:type user: dict or None
:param force: Set to True to skip permission checking. If False, the
returned models will be filtered.
:type force: bool
:returns: an ordered list of dictionaries from root to the current item
"""
from girderformindlogger.models.folder import Folder
folderModel = Folder()
curFolder = folderModel.load(
item['folderId'], user=user, level=AccessType.READ, force=force)
folderIdsToRoot = folderModel.parentsToRoot(
curFolder, user=user, level=AccessType.READ, force=force)
if force:
folderIdsToRoot.append({'type': 'folder', 'object': curFolder})
else:
filteredFolder = folderModel.filter(curFolder, user)
folderIdsToRoot.append({'type': 'folder', 'object': filteredFolder})
return folderIdsToRoot
def copyItem(self, srcItem, creator, name=None, folder=None, description=None):
"""
Copy an item, including duplicating files and metadata.
:param srcItem: the item to copy.
:type srcItem: dict
:param creator: the user who will own the copied item.
:param name: The name of the new item. None to copy the original name.
:type name: str
:param folder: The parent folder of the new item. None to store in the
same folder as the original item.
:param description: Description for the new item. None to copy the
original description.
:type description: str
:returns: the new item.
"""
from girderformindlogger.models.file import File
from girderformindlogger.models.folder import Folder
if name is None:
name = srcItem['name']
if folder is None:
folder = Folder().load(srcItem['folderId'], force=True)
if description is None:
description = srcItem['description']
newItem = self.createItem(
folder=folder, name=name, creator=creator, description=description)
# copy metadata and other extension values
newItem['meta'] = copy.deepcopy(srcItem['meta'])
filteredItem = self.filter(newItem, creator)
for key in srcItem:
if key not in filteredItem and key not in newItem:
newItem[key] = copy.deepcopy(srcItem[key])
# add a reference to the original item
newItem['copyOfItem'] = srcItem['_id']
newItem = self.save(newItem, triggerEvents=False)
# Give listeners a chance to change things
events.trigger('model.item.copy.prepare', (srcItem, newItem))
# copy files
fileModel = File()
for file in self.childFiles(item=srcItem):
fileModel.copyFile(file, creator=creator, item=newItem)
# Reload to get updated size value
newItem = self.load(newItem['_id'], force=True)
events.trigger('model.item.copy.after', newItem)
return newItem
def fileList(self, doc, user=None, path='', includeMetadata=False,
subpath=True, mimeFilter=None, data=True):
"""
This function generates a list of 2-tuples whose first element is the
relative path to the file from the item's root and whose second
element depends on the value of the `data` flag. If `data=True`, the
second element will be a generator that will generate the bytes of the
file data as stored in the assetstore. If `data=False`, the second
element will be the file document itself.
:param doc: The item to list.
:param user: A user used to validate data that is returned. This isn't
used, but is present to be consistent across all model
implementations of fileList.
:param path: A path prefix to add to the results.
:type path: str
:param includeMetadata: If True and there is any metadata, include a
result which is the JSON string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the item.
:type includeMetadata: bool
:param subpath: If True and the item has more than one file, any
metadata, or the sole file is not named the same as the
item, then the returned paths include the item name.
:type subpath: bool
:param mimeFilter: Optional list of MIME types to filter by. Set to
None to include all files.
:type mimeFilter: `list or tuple`
:param data: If True return raw content of each file as stored in the
assetstore, otherwise return file document.
:type data: bool
:returns: Iterable over files in this item, where each element is a
tuple of (path name of the file, stream function with file
data or file object).
:rtype: generator(str, func)
"""
from girderformindlogger.models.file import File
if subpath:
files = list(self.childFiles(item=doc, limit=2))
if (len(files) != 1 or files[0]['name'] != doc['name']
or (includeMetadata and doc.get('meta', {}))):
path = os.path.join(path, doc['name'])
metadataFile = 'girder-item-metadata.json'
fileModel = File()
# Eagerly evaluate this list, as the MongoDB cursor can time out on long requests
# Don't use a "filter" projection here, since returning the full file document is promised
# by this function, and file objects tend to not have large fields present
childFiles = list(self.childFiles(item=doc))
for file in childFiles:
if not self._mimeFilter(file, mimeFilter):
continue
if file['name'] == metadataFile:
metadataFile = None
if data:
val = fileModel.download(file, headers=False)
else:
val = file
yield (os.path.join(path, file['name']), val)
if includeMetadata and metadataFile and len(doc.get('meta', {})):
def stream():
yield json.dumps(doc['meta'], default=str)
yield (os.path.join(path, metadataFile), stream)
def _mimeFilter(self, file, mimeFilter):
"""
Returns whether or not the given file should be passed through the given
MIME filter. If no MIME filter is specified, all files are allowed.
"""
if not mimeFilter:
return True
return file['mimeType'] in mimeFilter
def isOrphan(self, item):
"""
Returns True if this item is orphaned (its folder is missing).
:param item: The item to check.
:type item: dict
"""
from girderformindlogger.models.folder import Folder
return not Folder().load(item.get('folderId'), force=True)
def updateSize(self, doc):
"""
Recomputes the size of this item and its underlying
files and fixes the sizes as needed.
:param doc: The item.
:type doc: dict
"""
from girderformindlogger.models.file import File
# get correct size from child files
size = 0
fixes = 0
fileModel = File()
for file in self.childFiles(doc):
s, f = fileModel.updateSize(file)
size += s
fixes += f
# fix value if incorrect
if size != doc.get('size'):
self.update({'_id': doc['_id']}, update={'$set': {'size': size}})
fixes += 1
return size, fixes
| 38.287582
| 98
| 0.563375
|
e6717ed27aee333075f6a656c202685e3d314e19
| 4,412
|
py
|
Python
|
discord/file.py
|
jeromedontdev/discord.py
|
42bab370a73440fa8af2380211ad92ccb6bf7f46
|
[
"MIT"
] | 13
|
2020-12-16T06:13:11.000Z
|
2021-04-15T12:01:38.000Z
|
discord/file.py
|
RootGC/discord.py
|
8bc489dba8b8c7ca9141e4e7f00a0e916a7c0269
|
[
"MIT"
] | 1
|
2021-05-23T16:08:10.000Z
|
2021-05-23T16:08:10.000Z
|
discord/file.py
|
RootGC/discord.py
|
8bc489dba8b8c7ca9141e4e7f00a0e916a7c0269
|
[
"MIT"
] | 6
|
2020-12-16T00:01:24.000Z
|
2021-02-05T12:32:54.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING, Union
import os
import io
__all__ = (
'File',
)
class File:
r"""A parameter object used for :meth:`abc.Messageable.send`
for sending file objects.
.. note::
File objects are single use and are not meant to be reused in
multiple :meth:`abc.Messageable.send`\s.
Attributes
-----------
fp: Union[:class:`os.PathLike`, :class:`io.BufferedIOBase`]
A file-like object opened in binary mode and read mode
or a filename representing a file in the hard drive to
open.
.. note::
If the file-like object passed is opened via ``open`` then the
modes 'rb' should be used.
To pass binary data, consider usage of ``io.BytesIO``.
filename: Optional[:class:`str`]
The filename to display when uploading to Discord.
If this is not given then it defaults to ``fp.name`` or if ``fp`` is
a string then the ``filename`` will default to the string given.
spoiler: :class:`bool`
Whether the attachment is a spoiler.
"""
__slots__ = ('fp', 'filename', 'spoiler', '_original_pos', '_owner', '_closer')
if TYPE_CHECKING:
fp: io.BufferedIOBase
filename: Optional[str]
spoiler: bool
def __init__(
self,
fp: Union[str, bytes, os.PathLike, io.BufferedIOBase],
filename: Optional[str] = None,
*,
spoiler: bool = False,
):
if isinstance(fp, io.IOBase):
if not (fp.seekable() and fp.readable()):
raise ValueError(f'File buffer {fp!r} must be seekable and readable')
self.fp = fp
self._original_pos = fp.tell()
self._owner = False
else:
self.fp = open(fp, 'rb')
self._original_pos = 0
self._owner = True
# aiohttp only uses two methods from IOBase
# read and close, since I want to control when the files
# close, I need to stub it so it doesn't close unless
# I tell it to
self._closer = self.fp.close
self.fp.close = lambda: None
if filename is None:
if isinstance(fp, str):
_, self.filename = os.path.split(fp)
else:
self.filename = getattr(fp, 'name', None)
else:
self.filename = filename
if spoiler and self.filename is not None and not self.filename.startswith('SPOILER_'):
self.filename = 'SPOILER_' + self.filename
self.spoiler = spoiler or (self.filename is not None and self.filename.startswith('SPOILER_'))
def reset(self, *, seek: bool = True) -> None:
# The `seek` parameter is needed because
# the retry-loop is iterated over multiple times
# starting from 0, as an implementation quirk
# the resetting must be done at the beginning
# before a request is done, since the first index
# is 0, and thus false, then this prevents an
# unnecessary seek since it's the first request
# done.
if seek:
self.fp.seek(self._original_pos)
def close(self) -> None:
self.fp.close = self._closer
if self._owner:
self._closer()
| 34.46875
| 102
| 0.643246
|
0ddd30b9175786708bd8ed7ebcec8d552e95b7ca
| 48
|
py
|
Python
|
users/models/__init__.py
|
lynnetsy/ms_users
|
d51bec4bb6326752889bc9e1ea1f6fb5ecef3cbb
|
[
"MIT"
] | null | null | null |
users/models/__init__.py
|
lynnetsy/ms_users
|
d51bec4bb6326752889bc9e1ea1f6fb5ecef3cbb
|
[
"MIT"
] | null | null | null |
users/models/__init__.py
|
lynnetsy/ms_users
|
d51bec4bb6326752889bc9e1ea1f6fb5ecef3cbb
|
[
"MIT"
] | null | null | null |
from .model import Model
from .user import User
| 16
| 24
| 0.791667
|
011701b4ee777eaae2a463ec098acb8aae9f2c32
| 729
|
py
|
Python
|
setup.py
|
rethinkpriorities/survey_dud_detector
|
dd5cc769798b66405963e0b3469ce0b41c92e989
|
[
"MIT"
] | 2
|
2020-10-17T03:54:28.000Z
|
2021-01-05T19:53:13.000Z
|
setup.py
|
rethinkpriorities/survey_dud_detector
|
dd5cc769798b66405963e0b3469ce0b41c92e989
|
[
"MIT"
] | null | null | null |
setup.py
|
rethinkpriorities/survey_dud_detector
|
dd5cc769798b66405963e0b3469ce0b41c92e989
|
[
"MIT"
] | null | null | null |
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='survey_dud_detector',
version='0.2',
author='Peter Hurford',
author_email='peter@peterhurford.com',
description='Automatically detect bad responses in survey responses',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/rethinkpriorities/survey_dud_detector',
packages=setuptools.find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| 31.695652
| 74
| 0.66941
|
a810db968fd68b49d8e0a8cae492fdfe8c8dab0c
| 2,776
|
py
|
Python
|
hoomd/mpcd/test-py/init_make_random_test.py
|
moosee333/hoomd
|
bdbf221b62cb5166b03c0a10db58bebcb952f755
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/mpcd/test-py/init_make_random_test.py
|
moosee333/hoomd
|
bdbf221b62cb5166b03c0a10db58bebcb952f755
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/mpcd/test-py/init_make_random_test.py
|
moosee333/hoomd
|
bdbf221b62cb5166b03c0a10db58bebcb952f755
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2009-2018 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
# Maintainer: mphoward
import unittest
import numpy as np
import hoomd
from hoomd import mpcd
# unit tests for snapshots with mpcd particle data
class mpcd_init_make_random(unittest.TestCase):
def setUp(self):
hoomd.context.initialize()
# initialize an empty snapshot
hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=10.)))
def test_init(self):
s = mpcd.init.make_random(N=3, kT=1.0, seed=7)
# check number of particles
self.assertEqual(s.particles.N_global, 3)
if hoomd.comm.get_num_ranks() > 1:
if hoomd.comm.get_rank() == 0:
self.assertEqual(s.particles.N, 2)
else:
self.assertEqual(s.particles.N, 1)
# check tags
if hoomd.comm.get_num_ranks() > 1:
if hoomd.comm.get_rank() == 0:
self.assertEqual(s.particles.getTag(0), 0)
self.assertEqual(s.particles.getTag(1), 1)
else:
self.assertEqual(s.particles.getTag(0), 2)
else:
self.assertEqual(s.particles.getTag(0), 0)
self.assertEqual(s.particles.getTag(1), 1)
self.assertEqual(s.particles.getTag(2), 2)
# check default type creation
self.assertEqual(s.particles.n_types, 1)
self.assertEqual(s.particles.getNameByType(0), "A")
# check default mass
self.assertEqual(s.particles.mass, 1.0)
def test_random(self):
s = mpcd.init.make_random(N=100000, kT=0.5, seed=7)
snap = s.take_snapshot()
if hoomd.comm.get_rank() == 0:
# histogram particles long x, y, and z and check uniform with loose tol
pos = snap.particles.position
hist,_ = np.histogram(pos[:,0], bins=10, range=(-5.,5.))
np.testing.assert_allclose(hist, 10000., rtol=0.05)
hist,_ = np.histogram(pos[:,1], bins=10, range=(-5.,5.))
np.testing.assert_allclose(hist, 10000., rtol=0.05)
hist,_ = np.histogram(pos[:,2], bins=10, range=(-5.,5.))
np.testing.assert_allclose(hist, 10000., rtol=0.05)
# check velocities are distributed OK using loose tolerance on mean and variance
vel = snap.particles.velocity
vel = np.reshape(vel, (3*snap.particles.N, 1))
self.assertAlmostEqual(np.mean(vel), 0.0, places=5)
# sigma^2 = kT / m
self.assertAlmostEqual(np.mean(vel**2), 0.5, places=2)
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 37.513514
| 93
| 0.604107
|
af6953714ec76c5e3a8fb7b14f88db75d4e7294e
| 590
|
gyp
|
Python
|
binding.gyp
|
creativeschool/simplencrypt
|
8757353ee3d9ef81722f1cd8bfc9567c4c4d9134
|
[
"MIT"
] | null | null | null |
binding.gyp
|
creativeschool/simplencrypt
|
8757353ee3d9ef81722f1cd8bfc9567c4c4d9134
|
[
"MIT"
] | null | null | null |
binding.gyp
|
creativeschool/simplencrypt
|
8757353ee3d9ef81722f1cd8bfc9567c4c4d9134
|
[
"MIT"
] | null | null | null |
{
"targets": [
{
"target_name": "main",
"sources": [
"addon/main.cpp"
],
"include_dirs" : [
"<!@(node -p \"require('node-addon-api').include\")"
],
'defines': [ 'NAPI_DISABLE_CPP_EXCEPTIONS' ],
'conditions': [
['OS=="win"', {
'defines': [
'PSK="<!(echo %PSK%)"',
'PSKSALT="<!(echo %PSKSALT%)"'
]
}],
['OS!="win"', {
'defines': [
'PSK="<!(echo $PSK)"',
'PSKSALT="<!(echo $PSKSALT)"'
]
}]
]
}
]
}
| 21.071429
| 60
| 0.352542
|
abe19d0ecfcbb041500206734bd51c18dddcd03d
| 710
|
py
|
Python
|
setup.py
|
enezhadian/visualization
|
b70358fb295bf833484c0a25b7c6cb1c4e156ae6
|
[
"MIT"
] | 3
|
2017-05-06T12:40:43.000Z
|
2019-10-29T09:45:27.000Z
|
setup.py
|
enezhadian/visualization
|
b70358fb295bf833484c0a25b7c6cb1c4e156ae6
|
[
"MIT"
] | null | null | null |
setup.py
|
enezhadian/visualization
|
b70358fb295bf833484c0a25b7c6cb1c4e156ae6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from os import path
with open(path.join(path.abspath(path.dirname(__file__)), 'README.rst'), 'r') as readme:
long_description = readme.read()
setup(
name='visualization',
description='Deep Convolutional Neural Network Visaulization',
long_description=long_description,
author='Ehsan Nezhadian',
version='0.1.0',
license='MIT',
packages=find_packages(exclude=[]),
# Package dependencies:
install_requires=[
'numpy',
'pillow',
'scipy',
'tensorflow'
],
# Development dependencies:
extras_require={
'dev': [
'jupyter',
]
}
)
| 21.515152
| 88
| 0.616901
|
e6f807f02f0eca30f29d8a4cc5fc3c2337b23859
| 2,460
|
py
|
Python
|
ML_tests/LinearRegression_tests/LinearRegression_normal.py
|
xuyannus/Machine-Learning-Collection
|
6d5dcd18d4e40f90e77355d56a2902e4c617ecbe
|
[
"MIT"
] | 3,094
|
2020-09-20T04:34:31.000Z
|
2022-03-31T23:59:46.000Z
|
ML_tests/LinearRegression_tests/LinearRegression_normal.py
|
xkhainguyen/Machine-Learning-Collection
|
425d196e9477dbdbbd7cc0d19d29297571746ab5
|
[
"MIT"
] | 79
|
2020-09-24T08:54:17.000Z
|
2022-03-30T14:45:08.000Z
|
ML_tests/LinearRegression_tests/LinearRegression_normal.py
|
xkhainguyen/Machine-Learning-Collection
|
425d196e9477dbdbbd7cc0d19d29297571746ab5
|
[
"MIT"
] | 1,529
|
2020-09-20T16:21:21.000Z
|
2022-03-31T21:16:25.000Z
|
# Import folder where sorting algorithms
import sys
import unittest
import numpy as np
# For importing from different folders
# OBS: This is supposed to be done with automated testing,
# hence relative to folder we want to import from
sys.path.append("ML/algorithms/linearregression")
# If run from local:
# sys.path.append('../../ML/algorithms/linearregression/')
from linear_regression_normal_equation import linear_regression_normal_equation
class TestLinearRegression_NormalEq(unittest.TestCase):
def setUp(self):
# test cases we want to run
self.X1 = np.array([[0, 1, 2]]).T
self.y1 = np.array([1, 2, 3])
self.W1_correct = np.array([[1, 1]])
self.X2 = np.array([[0, 1]]).T
self.y2 = np.array([1, 0])
self.W2_correct = np.array([[1, -1]])
self.X3 = np.array([[1, 2, 3], [1, 2, 4]]).T
self.y3 = np.array([5, 10, 18])
self.W3_correct = np.array([[0, 2, 3]])
self.X4 = np.array([[0, 0]]).T
self.y4 = np.array([0, 0])
self.W4_correct = np.array([[0, 0]])
self.X5 = np.array([[0, 1, 2, 3, 4, 5]]).T
self.y5 = np.array([0, 0.99, 2.01, 2.99, 4.01, 4.99])
self.W5_correct = np.array([[0, 1]])
def test_perfectpositiveslope(self):
W = linear_regression_normal_equation(self.X1, self.y1)
print(W.shape)
print(self.W1_correct.shape)
boolean_array = np.isclose(W, self.W1_correct)
self.assertTrue(boolean_array.all())
def test_perfectnegativeslope(self):
W = linear_regression_normal_equation(self.X2, self.y2)
boolean_array = np.isclose(W, self.W2_correct)
self.assertTrue(boolean_array.all())
def test_multipledimension(self):
W = linear_regression_normal_equation(self.X3, self.y3)
print(W)
print(self.W3_correct)
boolean_array = np.isclose(W, self.W3_correct)
self.assertTrue(boolean_array.all())
def test_zeros(self):
W = linear_regression_normal_equation(self.X4, self.y4)
boolean_array = np.isclose(W, self.W4_correct)
self.assertTrue(boolean_array.all())
def test_noisydata(self):
W = linear_regression_normal_equation(self.X5, self.y5)
boolean_array = np.isclose(W, self.W5_correct, atol=1e-3)
self.assertTrue(boolean_array.all())
if __name__ == "__main__":
print("Running Linear Regression Normal Equation tests:")
unittest.main()
| 34.166667
| 79
| 0.642276
|
99f3e14ec256ecb5f74576d2e053c7772a3d0902
| 3,954
|
py
|
Python
|
src/cube2common/ivec.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | 1
|
2020-03-23T20:28:06.000Z
|
2020-03-23T20:28:06.000Z
|
src/cube2common/ivec.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | 1
|
2022-02-05T16:59:53.000Z
|
2022-02-08T17:30:55.000Z
|
src/cube2common/ivec.py
|
DanSeraf/spyd
|
af893b7f9c67785613b25754eb2cf150523a9fe4
|
[
"Zlib"
] | null | null | null |
from cube2common.vec import vec
R = [1, 2, 0]
C = [2, 0, 1]
D = [0, 1, 2]
class ivec(object):
def __init__(self, *args):
self.v = [0]*3
if len(args) == 1 and isinstance(args[0], vec):
v = args[0]
self.x = v.x
self.y = v.y
self.z = v.z
elif len(args) == 1:
i = args[0]
self.x = ((i&1)>>0)
self.y = ((i&2)>>1)
self.z = ((i&4)>>2)
elif len(args) == 3:
self.v = args
elif len(args) == 4:
d, row, col, depth = args
self.v[R[d]] = row;
self.v[C[d]] = col;
self.v[D[d]] = depth;
elif len(args) == 5:
i, cx, cy, cz, size = args
self.x = cx+((i&1)>>0)*size;
self.y = cy+((i&2)>>1)*size;
self.z = cz+((i&4)>>2)*size;
def __repr__(self):
return "<vec: {x}, {y}, {z}>".format(x=self.x, y=self.y, z=self.z)
def copy(self):
return vec(self.x, self.y, self.z)
def __eq__(self, other):
if isinstance(other, ivec):
return self.x == other.x and self.y == other.y and self.z == other.z
else:
return False
def __ne__(self, other):
if isinstance(other, ivec):
return self.x != other.x or self.y != other.y or self.z != other.z
else:
return False
def __getitem__(self, index):
return self.v[index]
def __setitem__(self, index, value):
self.v[index] = value
@property
def x(self):
return self.v[0]
@x.setter
def x(self, value):
self.v[0] = value
@property
def y(self):
return self.v[1]
@y.setter
def y(self, value):
self.v[1] = value
@property
def z(self):
return self.v[2]
@z.setter
def z(self, value):
self.v[2] = value
@property
def r(self):
return self.v[0]
@r.setter
def r(self, value):
self.v[0] = value
@property
def g(self):
return self.v[1]
@g.setter
def g(self, value):
self.v[1] = value
@property
def b(self):
return self.v[2]
@b.setter
def b(self, value):
self.v[2] = value
def iszero(self):
return self.x == 0 and self.y == 0 and self.z == 0
def shl(self, n):
self.x <<= n
self.y <<= n
self.z <<= n
def shr(self, n):
self.x >>= n
self.y >>= n
self.z >>= n
def mul(self, item):
if isinstance(item, ivec):
self.x *= item.x
self.y *= item.y
self.z *= item.z
else:
self.x *= item
self.y *= item
self.z *= item
return self
def div(self, item):
if isinstance(item, ivec):
self.x /= item.x
self.y /= item.y
self.z /= item.z
else:
self.x /= item
self.y /= item
self.z /= item
return self
def add(self, item):
if isinstance(item, ivec):
self.x += item.x
self.y += item.y
self.z += item.z
else:
self.x += item
self.y += item
self.z += item
return self
def sub(self, item):
if isinstance(item, ivec):
self.x -= item.x
self.y -= item.y
self.z -= item.z
else:
self.x -= item
self.y -= item
self.z -= item
return self
def mask(self, n):
self.x &= n
self.y &= n
self.z &= n
return self
| 22.465909
| 80
| 0.40693
|
f7a94bf19dba5353e7a8694992bd2c36adda36c8
| 29,184
|
py
|
Python
|
Code-SPA/RNN_feedback.py
|
1212Prajwol-Pdl/SmartProcessAnalytics
|
b25b6e922e19cc61cfb9eb96395ad177af1daf71
|
[
"MIT"
] | 2
|
2020-11-18T02:50:06.000Z
|
2021-02-02T16:18:32.000Z
|
Code-SPA/RNN_feedback.py
|
1212Prajwol-Pdl/SmartProcessAnalytics
|
b25b6e922e19cc61cfb9eb96395ad177af1daf71
|
[
"MIT"
] | 1
|
2021-09-09T15:52:53.000Z
|
2022-02-03T05:43:15.000Z
|
Code-SPA/RNN_feedback.py
|
1212Prajwol-Pdl/SmartProcessAnalytics
|
b25b6e922e19cc61cfb9eb96395ad177af1daf71
|
[
"MIT"
] | 3
|
2021-02-02T16:18:34.000Z
|
2021-09-08T06:27:37.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 17:52:31 2019
@author: Weike (Vicky) Sun vickysun@mit.edu/weike.sun93@gmail.com
(c) 2020 Weike Sun, all rights reserved
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 09:15:46 2018
@author: weiksun
@comment: this file contains the RNN formulation for regression purpose w/ feedback connection
"""
"""
Import package
"""
import numpy as np
import tensorflow as tf
"""
Generate batch data
"""
def gen_batch(raw_x, raw_y,raw_yp, batch_size, num_steps, epoch_overlap):
data_length = len(raw_x)
dx = np.shape(raw_x)[1]
dy = np.shape(raw_y)[1]
dyp = np.shape(raw_yp)[1]
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)
data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)
data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i : batch_partition_length * (i+1)]
data_y[i] = raw_y[batch_partition_length * i : batch_partition_length * (i+1)]
data_yp[i] = raw_yp[batch_partition_length * i : batch_partition_length * (i+1)]
if epoch_overlap == None:
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yp = data_yp[:, i * num_steps:(i + 1) * num_steps]
yield (x, y, yp)
else:
epoch_size = (batch_partition_length - num_steps + 1)//(epoch_overlap+1)
for i in range(epoch_size):
x = data_x[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
y = data_y[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
yp = data_yp[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
yield (x, y, yp)
"""
Generate batch data for multiple series
"""
def gen_batch_multi(raw_x, raw_y, timeindex, batch_size, num_steps, epoch_overlap):
cum = 0
num_series = len(timeindex)
for s in range(num_series):
num = np.shape(timeindex[s+1])[0]
x = raw_x[cum:cum+num]
y = raw_y[cum:cum+num]
yp = np.insert(y,0,0,axis=0)[:-1]
data_length = len(x)
dx = np.shape(x)[1]
dy = np.shape(y)[1]
dyp = np.shape(yp)[1]
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)
data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)
data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)
for i in range(batch_size):
data_x[i] = x[batch_partition_length * i : batch_partition_length * (i+1)]
data_y[i] = y[batch_partition_length * i : batch_partition_length * (i+1)]
data_yp[i] = yp[batch_partition_length * i : batch_partition_length * (i+1)]
if epoch_overlap == None:
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yp = data_yp[:, i * num_steps:(i + 1) * num_steps]
yield (x, y, yp,s)
else:
epoch_size = (batch_partition_length - num_steps + 1)//(epoch_overlap+1)
for i in range(epoch_size):
x = data_x[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
y = data_y[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
yp = data_yp[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
yield (x, y, yp,s)
cum += num
"""
Generate batch data for kstep prediction
"""
def gen_batch_kstep(raw_x, raw_y,raw_yp, rnn_state, batch_size, num_steps, epoch_overlap):
data_length = len(raw_x)
dx = np.shape(raw_x)[1]
dy = np.shape(raw_y)[1]
dyp = np.shape(raw_yp)[1]
ds = np.shape(rnn_state)[1]
batch_partition_length = data_length // batch_size
data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)
data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)
data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)
data_s = np.zeros([batch_size, batch_partition_length,ds], dtype= np.float32)
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i : batch_partition_length * (i+1)]
data_y[i] = raw_y[batch_partition_length * i : batch_partition_length * (i+1)]
data_yp[i] = raw_yp[batch_partition_length * i : batch_partition_length * (i+1)]
data_s[i] = rnn_state[batch_partition_length * i : batch_partition_length * (i+1)]
if epoch_overlap == None:
epoch_size = batch_partition_length // num_steps
for i in range(epoch_size):
x = data_x[:, i * num_steps:(i + 1) * num_steps]
y = data_y[:, i * num_steps:(i + 1) * num_steps]
yp = data_yp[:, i * num_steps:(i + 1) * num_steps]
s = data_s[:, i * num_steps:(i + 1) * num_steps]
yield (x, y, yp, s)
else:
epoch_size = (batch_partition_length - num_steps + 1)//(epoch_overlap+1)
for i in range(epoch_size):
x = data_x[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
y = data_y[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
yp = data_yp[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
s = data_s[:, i*(epoch_overlap+1):i*(epoch_overlap+1)+num_steps]
yield (x, y, yp, s)
"""
Generate batch data for kstep prediction
"""
def gen_batch_kstep_layer(raw_x, raw_y,raw_yp, rnn_state):
data_length = len(raw_x)
dx = np.shape(raw_x)[1]
dy = np.shape(raw_y)[1]
dyp = np.shape(raw_yp)[1]
num_layers = len(rnn_state)
batch_size = data_length
batch_partition_length = 1
data_x = np.zeros([batch_size, batch_partition_length,dx], dtype= np.float32)
data_y = np.zeros([batch_size, batch_partition_length,dy], dtype= np.float32)
data_yp = np.zeros([batch_size, batch_partition_length,dyp], dtype= np.float32)
final_data_s = ()
for i in range(batch_size):
data_x[i] = raw_x[batch_partition_length * i : batch_partition_length * (i+1)]
data_y[i] = raw_y[batch_partition_length * i : batch_partition_length * (i+1)]
data_yp[i] = raw_yp[batch_partition_length * i : batch_partition_length * (i+1)]
for l in range(num_layers):
final_data_s += (rnn_state[l][:-1],)
yield (data_x, data_y, data_yp, final_data_s)
def gen_epochs(raw_data_x,raw_data_y,raw_data_yp, num_epochs, num_steps, batch_size,epoch_overlap):
for i in range(int(num_epochs)):
yield gen_batch(raw_data_x,raw_data_y, raw_data_yp, batch_size, num_steps, epoch_overlap)
def gen_epochs_multi(raw_data_x,raw_data_y, timeindex, num_epochs, num_steps, batch_size,epoch_overlap):
for i in range(int(num_epochs)):
yield gen_batch_multi(raw_data_x,raw_data_y, timeindex, batch_size, num_steps, epoch_overlap)
def reset_graph():
if 'sess' in globals() and sess:
sess.close()
tf.reset_default_graph()
"""
Define RNN graph
"""
def build_multilayer_rnn_graph_with_dynamic_rnn(cell_type, activation,state_size, num_steps, num_layers, input_size_x, input_size_y , learning_rate, lambda_l2_reg,random_seed=0):
reset_graph()
tf.set_random_seed(random_seed) #make reproducible results
input_size_x += input_size_y
"""Define the graph inputs"""
batch_size = tf.placeholder(tf.int32, [], name='batch_size')
x = tf.placeholder(tf.float32, [None, num_steps, input_size_x], name='x')
y = tf.placeholder(tf.float32, [None, num_steps, input_size_y], name='y')
input_prob = tf.placeholder(tf.float32, name='input_prob')
state_prob = tf.placeholder(tf.float32,name='state_prob')
output_prob = tf.placeholder(tf.float32,name='output_prob')
rnn_inputs = x
"""Define a single cell with variational dropout"""
def get_a_cell(state_size,input_prob,state_prob,num_input):
if cell_type == 'LSTM':
if activation == 'linear':
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif activation == 'relu':
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
else: #tanh by default
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif cell_type == 'GRU':
if activation == 'linear':
gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.identity)
cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif activation == 'relu':
gru=tf.nn.rnn_cell.GRUCell(state_size, activation = tf.nn.relu)
cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
else:
gru=tf.nn.rnn_cell.GRUCell(state_size)
cell_drop=tf.contrib.rnn.DropoutWrapper(gru,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
else:
if activation == 'linear':
cell_basic = tf.contrib.rnn.BasicRNNCell(state_size,activation=tf.identity)
cell_drop=tf.contrib.rnn.DropoutWrapper(cell_basic,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif activation == 'relu':
cell_basic = tf.contrib.rnn.BasicRNNCell(state_size, activation=tf.nn.relu)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32,
input_size=num_input, input_keep_prob=input_prob,
state_keep_prob=state_prob)
else: #tanh by default
cell_basic = tf.contrib.rnn.BasicRNNCell(state_size)
cell_drop = tf.contrib.rnn.DropoutWrapper(cell_basic, variational_recurrent=True, dtype=tf.float32,
input_size=num_input, input_keep_prob=input_prob,
state_keep_prob=state_prob)
return cell_drop
"""Wrap the cell in multilayer"""
cell=tf.nn.rnn_cell.MultiRNNCell([get_a_cell(state_size,input_prob,state_prob,input_size_x if layer==0 else state_size) for layer in range(num_layers)],state_is_tuple=True)
cell=tf.nn.rnn_cell.DropoutWrapper(cell,variational_recurrent=True,dtype=tf.float32,input_size=input_size_x,output_keep_prob=output_prob)
init_state = cell.zero_state(batch_size, dtype=tf.float32)
"""Build dynamic graph"""
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell=cell, inputs=rnn_inputs,initial_state=init_state)
"""Add prediction layer"""
with tf.variable_scope('softmax'):
W = tf.get_variable('W', [state_size, input_size_y])
b = tf.get_variable('b', [input_size_y], initializer=tf.constant_initializer(0.0))
rnn_outputs = tf.reshape(rnn_outputs, [-1, state_size])
predictions = tf.matmul(rnn_outputs, W) + b
yy = tf.reshape(y, [-1, input_size_y]) #batch_size*num_steps when yo udefine a placeholder in Tensorflow, the shape of the input during the session should be the same as the shape of the plcae holder
"Mean squared error loss"
loss=tf.reduce_mean(tf.square(tf.reshape(predictions,[-1])-tf.reshape(yy,[-1])))
"Adding regularization"
if lambda_l2_reg > 0 :
cell_l2 = tf.reduce_sum([tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables() if not ("noreg" in tf_var.name or "Bias" in tf_var.name)])
Predict_l2 = tf.nn.l2_loss(W) #+ tf.nn.l2_loss(b)
total_loss = tf.reduce_sum(loss + lambda_l2_reg* tf.reduce_sum(cell_l2+Predict_l2) )
else:
total_loss = loss
"Define the train_step"
train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)
return dict(x=x,
y=y,
batch_size=batch_size,
input_prob=input_prob,
state_prob=state_prob,
output_prob=output_prob,
init_state=init_state,
final_state=final_state,
rnn_outputs = rnn_outputs,
total_loss= total_loss,
loss = loss,
train_step=train_step,
preds = predictions,
saver= tf.train.Saver())
"""
Train RNN graph
"""
def train_rnn(raw_data_x, raw_data_y, val_data_x, val_data_y,g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False):
with tf.Session() as sess:
"initialize the variables"
sess.run(tf.global_variables_initializer())
raw_data_yp = np.insert(raw_data_y,0,0,axis=0)[:-1]
val_data_yp = np.insert(val_data_y,0,0,axis=0)[:-1]
"see the trainable variables"
# print("The trainable variables are:")
variable_names = [v.name for v in tf.trainable_variables()]
variable_shapes = [v.get_shape() for v in tf.trainable_variables()]
parameter_num = 0
for name, shape in zip(variable_names, variable_shapes):
# print('{}\nShape: {}'.format(name, shape))
parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0]
"train the graph"
training_losses = []
val_losses = []
#set early_stopping cretirion
checks_without_progress = 0
best_loss = np.infty
for idx, epoch in enumerate(gen_epochs(raw_data_x,raw_data_y,raw_data_yp,num_epochs, num_steps, batch_size,epoch_overlap)):
training_loss = 0
steps = 0
training_state = None
for steps,(X, Y, YP) in enumerate(epoch):
feed_dict = {g['x']: np.dstack((X,YP)), g['y']: Y, g['batch_size']:batch_size, g['input_prob']: input_prob ,g['output_prob']: output_prob,g['state_prob']:state_prob}
# feed_dict = {g['x']: X, g['y']: Y, g['batch_size']:batch_size, g['input_prob']: 1 ,g['output_prob']: 1,g['state_prob']:1}
#continue to feed in if in the same class
if training_state is not None:
feed_dict[g['init_state']] = training_state
training_loss_, training_state, _ = sess.run([g['loss'],
g['final_state'],
g['train_step']],
feed_dict=feed_dict)
training_loss += training_loss_
if np.isnan(training_loss_):
print('Explode!!!!!!!!!')
return (None, None, None)
if verbose and idx%100==0:
print("Average training total loss for Epoch", idx, ":", training_loss/(steps+1))
training_losses.append(training_loss / (steps+1))
'''Test on validation set'''
if idx > epoch_before_val:
# print('Using validation for early stopping')
'''see performance on validation set and do early stopping'''
val_loss = 0
steps_val = 0
val_state = None
for steps_val,(X_val, Y_val, YP_val) in enumerate(gen_batch(val_data_x, val_data_y, val_data_yp, batch_size, num_steps,epoch_overlap)):
feed_dict_val = {g['x']: np.dstack((X_val,YP_val)), g['y']: Y_val, g['batch_size']:batch_size, g['input_prob']: 1 ,g['output_prob']: 1,g['state_prob']:1}
#continue to feed in if in the same class
if val_state is not None:
feed_dict_val[g['init_state']] = val_state
val_loss_,val_state = sess.run([g['loss'], g['final_state']],feed_dict=feed_dict_val)
val_loss += val_loss_
val_loss = val_loss/(steps_val+1)
val_losses.append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
checks_without_progress = 0
g['saver'].save(sess, save)
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
return (training_losses, val_losses, int(parameter_num))
if isinstance(save, str):
g['saver'].save(sess, save)
print("Max number train epoch reached")
training_losses = np.array(training_losses)
val_losses = np.array(val_losses)
return (training_losses,val_losses, int(parameter_num))
"""
Train RNN graph for multiple series
"""
def train_rnn_multi(raw_data_x, raw_data_y, val_data_x, val_data_y, timeindex_train, timeindex_val, g, num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob, epoch_before_val = 50, max_checks_without_progress=50,epoch_overlap=None, verbose=True, save=False):
with tf.Session() as sess:
"initialize the variables"
sess.run(tf.global_variables_initializer())
"see the trainable variables"
# print("The trainable variables are:")
variable_names = [v.name for v in tf.trainable_variables()]
variable_shapes = [v.get_shape() for v in tf.trainable_variables()]
parameter_num = 0
for name, shape in zip(variable_names, variable_shapes):
# print('{}\nShape: {}'.format(name, shape))
parameter_num += shape[0]*shape[1] if np.size(shape)>1 else shape[0]
"train the graph"
training_losses = []
val_losses = []
#set early_stopping cretirion
checks_without_progress = 0
best_loss = np.infty
for idx, epoch in enumerate(gen_epochs_multi(raw_data_x,raw_data_y, timeindex_train, num_epochs, num_steps, batch_size,epoch_overlap)):
training_loss = 0
steps = 0
s_threshold=0
training_state = None
for steps,(X, Y, YP, s) in enumerate(epoch):
feed_dict = {g['x']: np.dstack((X,YP)), g['y']: Y, g['batch_size']:batch_size, g['input_prob']: input_prob ,g['output_prob']: output_prob,g['state_prob']:state_prob}
#start to feed 0 initial for a new set of class
if s == s_threshold:
s_threshold += 1
training_state = None
#continue to feed in if in the same class
if training_state is not None:
feed_dict[g['init_state']] = training_state
training_loss_, training_state, _ = sess.run([g['loss'],
g['final_state'],
g['train_step']],
feed_dict=feed_dict)
training_loss += training_loss_
# print(steps)
# print(training_loss_)
if verbose and idx%100==0:
print("Average training total loss for Epoch", idx, ":", training_loss/(steps+1), steps, training_loss_)
training_losses.append(training_loss / (steps+1))
'''Test on validation set'''
if idx > epoch_before_val:
# print('Using validation for early stopping')
'''see performance on validation set and do early stopping'''
val_loss = 0
steps_val = 0
s_val_threshold = 0
val_state = None
for steps_val,(X_val, Y_val, YP_val, s_val) in enumerate(gen_batch_multi(val_data_x, val_data_y, timeindex_val, batch_size, num_steps, epoch_overlap)):
feed_dict_val = {g['x']: np.dstack((X_val,YP_val)), g['y']: Y_val, g['batch_size']:batch_size, g['input_prob']: 1 ,g['output_prob']: 1,g['state_prob']:1}
#start to feed 0 initial for a new set of class
if s_val == s_val_threshold:
s_val_threshold += 1
val_state = None
#continue to feed in if in the same class
if val_state is not None:
feed_dict_val[g['init_state']] = val_state
val_loss_,val_state = sess.run([g['loss'], g['final_state']],feed_dict=feed_dict_val)
val_loss += val_loss_
print('val')
print(val_loss)
val_loss = val_loss/(steps_val+1)
val_losses.append(val_loss)
if val_loss < best_loss:
best_loss = val_loss
checks_without_progress = 0
g['saver'].save(sess, save)
else:
checks_without_progress += 1
if checks_without_progress > max_checks_without_progress:
print("Early stopping!")
return (training_losses, val_losses, int(parameter_num))
if isinstance(save, str):
g['saver'].save(sess, save)
print("Max number train epoch reached")
training_losses = np.array(training_losses)
val_losses = np.array(val_losses)
return (training_losses,val_losses, int(parameter_num))
"""
Test RNN graph 0 step
"""
def test_rnn(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
test_data_yp = np.insert(test_data_y,0,0,axis=0)[:-1]
"read the trained graph"
g['saver'].restore(sess, checkpoint)
"run the test points"
#run the whole sequence, one class one total run
for index,(X, Y, YP) in enumerate(gen_batch(test_data_x, test_data_y,test_data_yp, 1, num_test, None)):
feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['batch_size']:1, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}
preds, rnn_outputs = sess.run([g['preds'], g['rnn_outputs']], feed_dict)
loss = np.sum((preds[1:]-test_data_y[1:])**2,axis=0)/(test_data_y.shape[0]-1)
return (preds,loss,rnn_outputs)
"""
Test RNN graph 0 step for multiplayer afterwards
"""
def test_rnn_layer(test_data_x,test_data_y, g, checkpoint, input_prob, output_prob, state_prob, num_test, num_layers):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
test_data_yp = np.insert(test_data_y,0,0,axis=0)[:-1]
final = {}
"read the trained graph"
g['saver'].restore(sess, checkpoint)
"run the test points"
for index,(X, Y, YP) in enumerate(gen_batch(test_data_x, test_data_y,test_data_yp, 1, 1, None)):
if index >0:
feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y,g['init_state']: rnn_outputs, g['batch_size']:1, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}
else:
feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['batch_size']:1, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}
preds, rnn_outputs = sess.run([g['preds'],g['final_state']], feed_dict)
if index>0:
final_preds = np.vstack((final_preds,preds))
else:
final_preds = preds
for i in range(num_layers):
if index >0:
final[i] = np.vstack((final[i],rnn_outputs[i]))
else:
final[i] = rnn_outputs[i]
final_inter_state=()
for i in range(num_layers):
final_inter_state += (final[i],)
loss = np.sum((final_preds[1:]-test_data_y[1:])**2,axis=0)/(test_data_y.shape[0]-1)
return (final_preds, loss, final_inter_state)
"""
Test RNN graph single layer
"""
def test_rnn_kstep(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result= {}
"read the trained graph"
g['saver'].restore(sess, checkpoint)
losses = []
for step_num in range(kstep):
k=step_num+1
for index,(X, Y, YP, S) in enumerate(gen_batch_kstep(test_data_x[k:], test_data_y[k:], preds[:-1],rnn_outputs[:-1], num_test-k,1, None)):
feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['init_state']: np.squeeze(S), g['batch_size']:num_test, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}
preds, rnn_outputs= sess.run([g['preds'], g['rnn_outputs']], feed_dict)
loss = np.sum((preds[1:]-test_data_y[1+k:])**2,axis=0)/test_data_y[1+k:].shape[0]
result[k] = preds
losses.append(loss)
return (result,losses)
"""
Test RNN graph multi layer
"""
def test_rnn_kstep_layer(test_data_x,test_data_y, preds, rnn_outputs, g, checkpoint, input_prob, output_prob, state_prob, num_test, kstep = 3):
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
result= {}
"read the trained graph"
g['saver'].restore(sess, checkpoint)
losses = []
for step_num in range(kstep):
k=step_num+1
for index,(X, Y, YP, S) in enumerate(gen_batch_kstep_layer(test_data_x[k:], test_data_y[k:], preds[:-1],rnn_outputs)):
feed_dict={g['x']: np.dstack((X,YP)), g['y']:Y, g['init_state']: S, g['batch_size']:num_test, g['input_prob']: input_prob,g['output_prob']:output_prob,g['state_prob']:state_prob}
preds, rnn_outputs= sess.run([g['preds'], g['final_state']], feed_dict)
loss = np.sum((preds[1:]-test_data_y[k+1:])**2,axis=0)/test_data_y[k+1:].shape[0]
result[k] = preds
losses.append(loss)
return (result,losses)
| 42.112554
| 276
| 0.578673
|
ef06c27200d8ad77d8f5058ee7f30f14751def04
| 1,799
|
py
|
Python
|
Preprocess/EdgeHistogramComputer.py
|
pection-zz/Lenquality-MachineLearning
|
61e10a7dcff07ad4e63ec9e88dd6f164cadf22ff
|
[
"MIT"
] | 1
|
2022-02-22T06:20:39.000Z
|
2022-02-22T06:20:39.000Z
|
Preprocess/EdgeHistogramComputer.py
|
pection/Lenquality-MachineLearning
|
61e10a7dcff07ad4e63ec9e88dd6f164cadf22ff
|
[
"MIT"
] | null | null | null |
Preprocess/EdgeHistogramComputer.py
|
pection/Lenquality-MachineLearning
|
61e10a7dcff07ad4e63ec9e88dd6f164cadf22ff
|
[
"MIT"
] | 1
|
2020-11-24T18:18:44.000Z
|
2020-11-24T18:18:44.000Z
|
import cv2
import numpy as np
import math
from DescriptorComputer import DescriptorComputer
class EdgeHistogramComputer(DescriptorComputer):
def __init__(self, rows, cols):
sqrt2 = math.sqrt(2)
self.kernels = (np.matrix([[1,1],[-1,-1]]), \
np.matrix([[1,-1],[1,-1]]), \
np.matrix([[sqrt2,0],[0,-sqrt2]]), \
np.matrix([[0,sqrt2],[-sqrt2,0]]), \
np.matrix([[2,-2],[-2,2]]));
self.bins = [len(self.kernels)]
self.range = [0,len(self.kernels)]
self.rows = rows
self.cols = cols
self.prefix = "EDH"
def compute(self, frame):
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
descriptor = []
dominantGradients = np.zeros_like(frame)
maxGradient = cv2.filter2D(frame, cv2.CV_32F, self.kernels[0])
maxGradient = np.absolute(maxGradient)
for k in range(1,len(self.kernels)):
kernel = self.kernels[k]
gradient = cv2.filter2D(frame, cv2.CV_32F, kernel)
gradient = np.absolute(gradient)
np.maximum(maxGradient, gradient, maxGradient)
indices = (maxGradient == gradient)
dominantGradients[indices] = k
frameH, frameW = frame.shape
for row in range(self.rows):
for col in range(self.cols):
mask = np.zeros_like(frame)
mask[((frameH/self.rows)*row):((frameH/self.rows)*(row+1)),(frameW/self.cols)*col:((frameW/self.cols)*(col+1))] = 255
hist = cv2.calcHist([dominantGradients], [0], mask, self.bins, self.range)
hist = cv2.normalize(hist, None)
descriptor.append(hist)
return np.concatenate([x for x in descriptor])
if __name__ == "__main__":
path = r"C:\Python_program\Machine_learning\CNN_model\Allimage\B4\newbad\Base_4_BL_8_7.jpg"
img = cv2.imread(path)
Edge = EdgeHistogramComputer(2,2)
img2 = Edge.compute(img)
cv2.imshow("comput", img2)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 34.596154
| 121
| 0.674263
|
c3e2697d194a8de83c3d6a6a296af4a526cf2470
| 10,315
|
py
|
Python
|
lldb/test/API/tools/lldb-vscode/breakpoint/TestVSCode_setBreakpoints.py
|
danliew-apple/llvm-project
|
323163820208428b74f272421e4a8e1ab0a158fb
|
[
"Apache-2.0"
] | 34
|
2020-01-31T17:50:00.000Z
|
2022-02-16T20:19:29.000Z
|
lldb/test/API/tools/lldb-vscode/breakpoint/TestVSCode_setBreakpoints.py
|
danliew-apple/llvm-project
|
323163820208428b74f272421e4a8e1ab0a158fb
|
[
"Apache-2.0"
] | 14
|
2020-02-03T23:39:51.000Z
|
2021-07-20T16:24:25.000Z
|
lldb/test/API/tools/lldb-vscode/breakpoint/TestVSCode_setBreakpoints.py
|
danliew-apple/llvm-project
|
323163820208428b74f272421e4a8e1ab0a158fb
|
[
"Apache-2.0"
] | 5
|
2020-07-22T16:56:37.000Z
|
2022-01-08T02:50:20.000Z
|
"""
Test lldb-vscode setBreakpoints request
"""
import unittest2
import vscode
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import lldbvscode_testcase
import os
class TestVSCode_setBreakpoints(lldbvscode_testcase.VSCodeTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfWindows
def test_set_and_clear(self):
'''Tests setting and clearing source file and line breakpoints.
This packet is a bit tricky on the debug adaptor side since there
is no "clearBreakpoints" packet. Source file and line breakpoints
are set by sending a "setBreakpoints" packet with a source file
specified and zero or more source lines. If breakpoints have been
set in the source file before, any exising breakpoints must remain
set, and any new breakpoints must be created, and any breakpoints
that were in previous requests and are not in the current request
must be removed. This function tests this setting and clearing
and makes sure things happen correctly. It doesn't test hitting
breakpoints and the functionality of each breakpoint, like
'conditions' and 'hitCondition' settings.'''
source_basename = 'main.cpp'
source_path = os.path.join(os.getcwd(), source_basename)
first_line = line_number('main.cpp', 'break 12')
second_line = line_number('main.cpp', 'break 13')
third_line = line_number('main.cpp', 'break 14')
lines = [first_line, second_line, third_line]
# Visual Studio Code Debug Adaptors have no way to specify the file
# without launching or attaching to a process, so we must start a
# process in order to be able to set breakpoints.
program = self.getBuildArtifact("a.out")
self.build_and_launch(program)
# Set 3 breakoints and verify that they got set correctly
response = self.vscode.request_setBreakpoints(source_path, lines)
line_to_id = {}
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
for breakpoint in breakpoints:
line = breakpoint['line']
# Store the "id" of the breakpoint that was set for later
line_to_id[line] = breakpoint['id']
self.assertTrue(line in lines, "line expected in lines array")
self.assertTrue(breakpoint['verified'],
"expect breakpoint verified")
# There is no breakpoint delete packet, clients just send another
# setBreakpoints packet with the same source file with fewer lines.
# Below we remove the second line entry and call the setBreakpoints
# function again. We want to verify that any breakpoints that were set
# before still have the same "id". This means we didn't clear the
# breakpoint and set it again at the same location. We also need to
# verify that the second line location was actually removed.
lines.remove(second_line)
# Set 2 breakoints and verify that the previous breakoints that were
# set above are still set.
response = self.vscode.request_setBreakpoints(source_path, lines)
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
for breakpoint in breakpoints:
line = breakpoint['line']
# Verify the same breakpoints are still set within LLDB by
# making sure the breakpoint ID didn't change
self.assertEquals(line_to_id[line], breakpoint['id'],
"verify previous breakpoints stayed the same")
self.assertTrue(line in lines, "line expected in lines array")
self.assertTrue(breakpoint['verified'],
"expect breakpoint still verified")
# Now get the full list of breakpoints set in the target and verify
# we have only 2 breakpoints set. The response above could have told
# us about 2 breakpoints, but we want to make sure we don't have the
# third one still set in the target
response = self.vscode.request_testGetTargetBreakpoints()
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
for breakpoint in breakpoints:
line = breakpoint['line']
# Verify the same breakpoints are still set within LLDB by
# making sure the breakpoint ID didn't change
self.assertEquals(line_to_id[line], breakpoint['id'],
"verify previous breakpoints stayed the same")
self.assertTrue(line in lines, "line expected in lines array")
self.assertTrue(breakpoint['verified'],
"expect breakpoint still verified")
# Now clear all breakpoints for the source file by passing down an
# empty lines array
lines = []
response = self.vscode.request_setBreakpoints(source_path, lines)
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
# Verify with the target that all breakpoints have been cleared
response = self.vscode.request_testGetTargetBreakpoints()
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
# Now set a breakpoint again in the same source file and verify it
# was added.
lines = [second_line]
response = self.vscode.request_setBreakpoints(source_path, lines)
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
for breakpoint in breakpoints:
line = breakpoint['line']
self.assertTrue(line in lines, "line expected in lines array")
self.assertTrue(breakpoint['verified'],
"expect breakpoint still verified")
# Now get the full list of breakpoints set in the target and verify
# we have only 2 breakpoints set. The response above could have told
# us about 2 breakpoints, but we want to make sure we don't have the
# third one still set in the target
response = self.vscode.request_testGetTargetBreakpoints()
if response:
breakpoints = response['body']['breakpoints']
self.assertEquals(len(breakpoints), len(lines),
"expect %u source breakpoints" % (len(lines)))
for breakpoint in breakpoints:
line = breakpoint['line']
self.assertTrue(line in lines, "line expected in lines array")
self.assertTrue(breakpoint['verified'],
"expect breakpoint still verified")
@skipIfWindows
def test_functionality(self):
'''Tests hitting breakpoints and the functionality of a single
breakpoint, like 'conditions' and 'hitCondition' settings.'''
source_basename = 'main.cpp'
source_path = os.path.join(os.getcwd(), source_basename)
loop_line = line_number('main.cpp', '// break loop')
program = self.getBuildArtifact("a.out")
self.build_and_launch(program)
# Set a breakpoint at the loop line with no condition and no
# hitCondition
breakpoint_ids = self.set_source_breakpoints(source_path, [loop_line])
self.assertEquals(len(breakpoint_ids), 1, "expect one breakpoint")
self.vscode.request_continue()
# Verify we hit the breakpoint we just set
self.verify_breakpoint_hit(breakpoint_ids)
# Make sure i is zero at first breakpoint
i = int(self.vscode.get_local_variable_value('i'))
self.assertEquals(i, 0, 'i != 0 after hitting breakpoint')
# Update the condition on our breakpoint
new_breakpoint_ids = self.set_source_breakpoints(source_path,
[loop_line],
condition="i==4")
self.assertEquals(breakpoint_ids, new_breakpoint_ids,
"existing breakpoint should have its condition "
"updated")
self.continue_to_breakpoints(breakpoint_ids)
i = int(self.vscode.get_local_variable_value('i'))
self.assertEquals(i, 4,
'i != 4 showing conditional works')
new_breakpoint_ids = self.set_source_breakpoints(source_path,
[loop_line],
hitCondition="2")
self.assertEquals(breakpoint_ids, new_breakpoint_ids,
"existing breakpoint should have its condition "
"updated")
# Continue with a hitContidtion of 2 and expect it to skip 1 value
self.continue_to_breakpoints(breakpoint_ids)
i = int(self.vscode.get_local_variable_value('i'))
self.assertEquals(i, 6,
'i != 6 showing hitCondition works')
# continue after hitting our hitCondition and make sure it only goes
# up by 1
self.continue_to_breakpoints(breakpoint_ids)
i = int(self.vscode.get_local_variable_value('i'))
self.assertEquals(i, 7,
'i != 7 showing post hitCondition hits every time')
| 50.072816
| 78
| 0.613088
|
46f71c943050902ec31441de0602aa6bdfc7b6ce
| 84
|
py
|
Python
|
src/visualization/recall.py
|
FilipCvetko/clinical_notes
|
5fa0d26898e0083fcf424cdd61d2190b32a495be
|
[
"MIT"
] | 2
|
2021-11-17T16:51:47.000Z
|
2021-12-16T23:00:26.000Z
|
src/visualization/recall.py
|
FilipCvetko/clinical_notes
|
5fa0d26898e0083fcf424cdd61d2190b32a495be
|
[
"MIT"
] | null | null | null |
src/visualization/recall.py
|
FilipCvetko/clinical_notes
|
5fa0d26898e0083fcf424cdd61d2190b32a495be
|
[
"MIT"
] | null | null | null |
import streamlit as st
def recall_app():
st.title("Patient notes collection")
| 14
| 40
| 0.72619
|
ad348ac0813f1f923264e97fd9c131d69d421ea5
| 3,686
|
py
|
Python
|
mapss/static/packages/arches/arches/management/commands/plugin.py
|
MPI-MAPSS/MAPSS
|
3a5c0109758801717aaa8de1125ca5e98f83d3b4
|
[
"CC0-1.0"
] | null | null | null |
mapss/static/packages/arches/arches/management/commands/plugin.py
|
MPI-MAPSS/MAPSS
|
3a5c0109758801717aaa8de1125ca5e98f83d3b4
|
[
"CC0-1.0"
] | null | null | null |
mapss/static/packages/arches/arches/management/commands/plugin.py
|
MPI-MAPSS/MAPSS
|
3a5c0109758801717aaa8de1125ca5e98f83d3b4
|
[
"CC0-1.0"
] | null | null | null |
"""
ARCHES - a program developed to inventory and manage immovable cultural heritage.
Copyright (C) 2013 J. Paul Getty Trust and World Monuments Fund
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import uuid
from arches.management.commands import utils
from arches.app.models import models
from django.core.management.base import BaseCommand, CommandError
from django.db.utils import IntegrityError
class Command(BaseCommand):
"""
Commands for managing Arches plugins
"""
def add_arguments(self, parser):
parser.add_argument("operation", nargs="?")
parser.add_argument("-s", "--source", action="store", dest="source", default="", help="Widget json file to be loaded")
parser.add_argument("-n", "--name", action="store", dest="name", default="", help="The name of the widget to unregister")
def handle(self, *args, **options):
if options["operation"] == "register":
self.register(source=options["source"])
if options["operation"] == "unregister":
self.unregister(name=options["name"])
if options["operation"] == "list":
self.list()
if options["operation"] == "update":
self.update(source=options["source"])
def register(self, source):
"""
Inserts a plugin into the arches db
"""
import json
details = {}
with open(source) as f:
details = json.load(f)
try:
uuid.UUID(details["pluginid"])
except:
details["pluginid"] = str(uuid.uuid4())
print("Registering plugin with pluginid: {}".format(details["pluginid"]))
instance = models.Plugin(
pluginid=details["pluginid"],
name=details["name"],
icon=details["icon"],
component=details["component"],
componentname=details["componentname"],
config=details["config"],
slug=details["slug"],
sortorder=details["sortorder"],
)
instance.save()
def update(self, source):
"""
Updates an existing plugin in the arches db
"""
import json
details = {}
with open(source) as f:
details = json.load(f)
instance = models.Plugin.objects.get(name=details["name"])
instance.icon = details["icon"]
instance.component = details["component"]
instance.componentname = details["componentname"]
instance.config = details["config"]
instance.save()
def unregister(self, name):
"""
Removes a function from the system
"""
try:
instance = models.Plugin.objects.get(name=name)
instance.delete()
except Exception as e:
print(e)
def list(self):
"""
Lists registered plugins
"""
try:
instances = models.Plugin.objects.all()
for instance in instances:
print(instance.name)
except Exception as e:
print(e)
| 29.488
| 129
| 0.613945
|
160513f13cec26a8b8bb6a1c63191521bf4b0c47
| 2,108
|
py
|
Python
|
application/models/topic.py
|
Rsl1122/Forum-Aurum
|
d9f2d6687b4411a3d9cba4c1a01923f6d4ba0c69
|
[
"MIT"
] | null | null | null |
application/models/topic.py
|
Rsl1122/Forum-Aurum
|
d9f2d6687b4411a3d9cba4c1a01923f6d4ba0c69
|
[
"MIT"
] | 2
|
2018-04-22T20:08:25.000Z
|
2018-08-11T20:57:47.000Z
|
application/models/topic.py
|
Rsl1122/Forum-Aurum
|
d9f2d6687b4411a3d9cba4c1a01923f6d4ba0c69
|
[
"MIT"
] | null | null | null |
import os
from sqlalchemy import text
from application import db
class Topic(db.Model):
__tablename__ = "topic"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
created = db.Column(db.DateTime, default=db.func.current_timestamp())
account_id = db.Column(db.Integer, db.ForeignKey('account.id'), nullable=False)
account = db.relationship("User", back_populates="topics", lazy=True)
area_id = db.Column(db.Integer, db.ForeignKey('area.id'), nullable=False)
area = db.relationship("Area", back_populates="topics", lazy=True)
messages = db.relationship("Message", back_populates='topic', lazy=True)
def __init__(self, name, area_id):
self.name = name
self.area_id = area_id
def last_post(self):
return self.messages[-1]
def last_post_by(self):
return self.last_post().account.name
def last_post_created(self):
return self.last_post().created
@staticmethod
def find_topic_count():
stmt = text("SELECT COUNT(*) as c FROM topic LIMIT 1")
res = db.engine.execute(stmt)
for row in res:
return row[0]
@staticmethod
def find_hot_topic():
stmt = None
if os.environ.get("HEROKU"):
stmt = text("SELECT topic.name, COUNT(*) as c FROM message"
" JOIN topic on message.topic_id = topic.id"
" WHERE message.created > now() - INTERVAL '1 WEEK'"
" GROUP BY topic.id"
" ORDER BY c DESC"
" LIMIT 1"
)
else:
stmt = text("SELECT topic.name, COUNT(*) as c FROM message"
" JOIN topic on message.topic_id = topic.id"
" WHERE message.created > datetime('now', '-7 day')"
" GROUP BY topic.id"
" ORDER BY c DESC"
" LIMIT 1"
)
res = db.engine.execute(stmt)
for row in res:
return row[0]
| 31.939394
| 83
| 0.552657
|
1ad7fb616b93780254e6d693ae294b2b656039e6
| 3,117
|
py
|
Python
|
src/_cffi_src/openssl/cryptography.py
|
hboshnak/cryptography
|
137074fcc419d62db82d23955bd4d0429eafb211
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2019-09-20T18:51:44.000Z
|
2019-09-20T18:51:44.000Z
|
src/_cffi_src/openssl/cryptography.py
|
hboshnak/cryptography
|
137074fcc419d62db82d23955bd4d0429eafb211
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 11
|
2022-03-04T14:14:10.000Z
|
2022-03-31T01:50:05.000Z
|
src/_cffi_src/openssl/cryptography.py
|
simo5/cryptography
|
617c66e2b9dcc311c2e5221b50a45ec99f5d0c67
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2022-03-31T04:00:31.000Z
|
2022-03-31T04:00:31.000Z
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
INCLUDES = """
/* define our OpenSSL API compatibility level to 1.0.1. Any symbols older than
that will raise an error during compilation. We can raise this number again
after we drop 1.0.2 support in the distant future. */
#define OPENSSL_API_COMPAT 0x10001000L
#include <openssl/opensslv.h>
#if defined(LIBRESSL_VERSION_NUMBER)
#define CRYPTOGRAPHY_IS_LIBRESSL 1
#else
#define CRYPTOGRAPHY_IS_LIBRESSL 0
#endif
#if defined(OPENSSL_IS_BORINGSSL)
#define CRYPTOGRAPHY_IS_BORINGSSL 1
#else
#define CRYPTOGRAPHY_IS_BORINGSSL 0
#endif
/*
LibreSSL removed e_os2.h from the public headers so we'll only include it
if we're using vanilla OpenSSL.
*/
#if !CRYPTOGRAPHY_IS_LIBRESSL
#include <openssl/e_os2.h>
#endif
#if defined(_WIN32)
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <Wincrypt.h>
#include <Winsock2.h>
#endif
#if CRYPTOGRAPHY_IS_LIBRESSL
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_322 \
(LIBRESSL_VERSION_NUMBER < 0x3020200f)
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_332 \
(LIBRESSL_VERSION_NUMBER < 0x3030200f)
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_340 \
(LIBRESSL_VERSION_NUMBER < 0x3040000f)
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 \
(LIBRESSL_VERSION_NUMBER < 0x3050000f)
#else
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_322 (0)
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_332 (0)
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_340 (0)
#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 (0)
#endif
#if OPENSSL_VERSION_NUMBER < 0x10100000
#error "pyca/cryptography MUST be linked with Openssl 1.1.0 or later"
#endif
#define CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER \
(OPENSSL_VERSION_NUMBER >= 0x10101040 && !CRYPTOGRAPHY_IS_LIBRESSL)
#define CRYPTOGRAPHY_OPENSSL_300_OR_GREATER \
(OPENSSL_VERSION_NUMBER >= 0x30000000 && !CRYPTOGRAPHY_IS_LIBRESSL)
#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111 \
(OPENSSL_VERSION_NUMBER < 0x10101000 || CRYPTOGRAPHY_IS_LIBRESSL)
#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B \
(OPENSSL_VERSION_NUMBER < 0x10101020 || CRYPTOGRAPHY_IS_LIBRESSL)
#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D \
(OPENSSL_VERSION_NUMBER < 0x10101040 || CRYPTOGRAPHY_IS_LIBRESSL)
#if (CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D && !CRYPTOGRAPHY_IS_LIBRESSL && \
!defined(OPENSSL_NO_ENGINE)) || defined(USE_OSRANDOM_RNG_FOR_TESTING)
#define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 1
#else
#define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 0
#endif
"""
TYPES = """
static const int CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER;
static const int CRYPTOGRAPHY_OPENSSL_300_OR_GREATER;
static const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111;
static const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B;
static const int CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE;
static const int CRYPTOGRAPHY_LIBRESSL_LESS_THAN_340;
static const int CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350;
static const int CRYPTOGRAPHY_IS_LIBRESSL;
static const int CRYPTOGRAPHY_IS_BORINGSSL;
"""
FUNCTIONS = """
"""
CUSTOMIZATIONS = """
"""
| 30.861386
| 79
| 0.812961
|
f12a7cb2ba41c8536aa061c3c4c4e63526343133
| 3,900
|
py
|
Python
|
lib/surface/logging/read.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/logging/read.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/logging/read.py
|
bopopescu/Google-Cloud-SDK-1
|
c4683bacb2f6192d8a816932e438a0493085469b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:13:29.000Z
|
2020-07-24T20:13:29.000Z
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging read' command."""
import datetime
from googlecloudsdk.api_lib.logging import common
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
class Read(base.Command):
"""Reads log entries."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
base.LIMIT_FLAG.AddToParser(parser)
parser.add_argument(
'log_filter', help=('A filter expression that specifies the '
'log entries to return.'),
nargs='?')
order_arg = base.ChoiceArgument(
'--order',
choices=('desc', 'asc'),
required=False,
default='desc',
help_str='Ordering of returned log entries based on timestamp field.'
)
order_arg.AddToParser(parser)
parser.add_argument(
'--freshness', required=False, type=arg_parsers.Duration(),
help=('Return entries that are not older than this value. '
'Works only with DESC ordering and filters without a timestamp. '
'See $ gcloud topic datetimes for information on '
'duration formats.'),
default='1d')
util.AddNonProjectArgs(parser, 'Read log entries')
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The list of log entries.
"""
# Take into account freshness only if all requirements are met.
if (args.freshness and args.order == 'desc' and
(not args.log_filter or 'timestamp' not in args.log_filter)):
# Argparser returns freshness in seconds.
freshness = datetime.timedelta(seconds=args.freshness)
# Cloud Logging uses timestamps in UTC timezone.
last_timestamp = datetime.datetime.utcnow() - freshness
# Construct timestamp filter.
log_filter = ('timestamp>="%s"' % util.FormatTimestamp(last_timestamp))
# Append any user supplied filters.
if args.log_filter:
log_filter += ' AND (%s)' % args.log_filter
else:
log_filter = args.log_filter
return common.FetchLogs(log_filter,
order_by=args.order,
limit=args.limit,
parent=util.GetParentFromArgs(args))
Read.detailed_help = {
'DESCRIPTION': """\
Reads log entries. Log entries matching *log-filter* are returned in
order of decreasing timestamps, most-recent entries first. If the log
entries come from multiple logs, then entries from different logs
might be intermingled in the results.
""",
'EXAMPLES': """\
To read log entries from Google Compute Engine instances, run:
$ {command} "resource.type=gce_instance"
To read log entries with severity ERROR or higher, run:
$ {command} "severity>=ERROR"
To read log entries written in a specific time window, run:
$ {command} 'timestamp<="2015-05-31T23:59:59Z" AND timestamp>="2015-05-31T00:00:00Z"'
Detailed information about filters can be found at:
[](https://cloud.google.com/logging/docs/view/advanced_filters)
""",
}
| 36.448598
| 95
| 0.66359
|
f69a792a0ba28cc5f663cabc37474b1c513ca384
| 7,627
|
py
|
Python
|
eternabench/chemmapping_utils.py
|
eternagame/EternaBench
|
1df177603ce326c1f68931a027b449f44f38c203
|
[
"MIT"
] | null | null | null |
eternabench/chemmapping_utils.py
|
eternagame/EternaBench
|
1df177603ce326c1f68931a027b449f44f38c203
|
[
"MIT"
] | null | null | null |
eternabench/chemmapping_utils.py
|
eternagame/EternaBench
|
1df177603ce326c1f68931a027b449f44f38c203
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import sys
sys.path.append('/Users/hwayment/das/github/')
from RDATKit import rdatkit
import subprocess as sp
from arnie.bpps import bpps
from glob import glob
import sys, os
from copy import deepcopy
def write_rdat_as_dataframe(rdat_file, verbose=True):
df = pd.DataFrame()
rdat = rdatkit.RDATFile()
rdat.load(open(rdat_file))
print(rdat.comments)
printed_modifier_warning = False
print(rdat.annotations)
constructs = [x for x in rdat.constructs.keys()]
for c in constructs:
for dat_ind in range(len(rdat.constructs[c].data)):
annotations = [x for x in rdat.constructs[c].data[dat_ind].annotations.keys()]
dct_to_add = deepcopy(rdat.annotations)
dct_to_add.update({'construct': c, 'filename': rdat.filename, 'seqpos': [x-1 for x in rdat.constructs[c].seqpos]})
for k in annotations:
if k in dct_to_add.keys():
if k=='modifier':
if not printed_modifier_warning:
print('Note: updating modifier for some')
printed_modifier_warning=True
dct_to_add.update({k: ', '.join(rdat.constructs[c].data[dat_ind].annotations[k][0])})
elif k=='MAPseq':
# parse out sequence-specific MAPseq tags: like project name, design name, etc
for field in rdat.constructs[c].data[dat_ind].annotations[k]:
key = field.split(':')[0]
subfield = ':'.join(field.split(':')[1:])
dct_to_add[key] = subfield
else:
dct_to_add[k].append(rdat.constructs[c].data[dat_ind].annotations[k][0])
else:
dct_to_add.update({k: rdat.constructs[c].data[dat_ind].annotations[k][0]})
dct_to_add.update({'reactivity': rdat.constructs[c].data[dat_ind].values,
'errors': rdat.constructs[c].data[dat_ind].errors})
df = df.append(dct_to_add, ignore_index=True)
return df
def filter_dataframe_with_cdhit(df, rdat_identifier):
#write fasta file for CD-HIT-EST
with open('%s.fasta' % rdat_identifier, 'w') as f:
for i,seq in enumerate(df['sequence']):
f.write(">%d\n" % i)
f.write("%s\n" % seq[:-1*len('AAAAGAAACAACAACAACAAC')]) # removing tail
print('Running CD-HIT-EST on %s...' % rdat_identifier)
p = sp.Popen(('%s/cd-hit-est -i %s.fasta -o %s_cdhit_output -c 0.8' % (os.environ['CDHIT_PATH'], rdat_identifier,rdat_identifier)).split(' '),
stdout=sp.PIPE, stderr=sp.PIPE)
p.wait()
clusters, local_clust=[],[]
df['passed_CDHIT_filter'] = False
print('Getting sequences from each cluster')
with open('%s_cdhit_output.clstr' % rdat_identifier,'r') as f:
for line in f.readlines():
if not line.startswith('>'):
if '>' in line:
local_clust.append(int(line.split('>')[1].split('.')[0]))
else:
clusters.append(local_clust)
local_clust=[]
for cluster in clusters:
if len(cluster) > 0:
if 'signal_to_noise' in df.keys():
clust_stn = [float(x.split(':')[-1]) for x in df.iloc[cluster]['signal_to_noise'].values]
df.loc[cluster[np.argmax(clust_stn)],['passed_CDHIT_filter']]=True
else:
df.loc[cluster[0],['passed_CDHIT_filter']]=True
return df
def filter_FMN_containing(df):
df['chem_string'] = [''.join(chem_list) for chem_list in df['chemical']]
filtered_df = df.loc[~df['chem_string'].str.contains('FMN:[1-9]', regex=True)]
filtered_df.drop(columns=['chem_string'],inplace=True)
return filtered_df
def get_polyA_indicator(row, polyA_len=3, DEBUG=False):
'''get indicator for which nucleotides are in polyA region.'''
N = len(row['trimmed_sequence'])
indicator=[0]*(polyA_len-1)
for i in range(polyA_len,N+1):
if ''.join(row['trimmed_sequence'][i-polyA_len:i]) == 'A'*polyA_len:
indicator.append(1)
else:
indicator.append(0)
if DEBUG:
if len(indicator) != N:
print(row['trimmed_sequence'])
print(row['sequence'])
print(indicator)
return indicator
def write_concatenated_dataframe(df, reactivity_field='reactivity'):
'''Input is dataframe with separate fields for each design, as well as possibly fields "p_<package_identifier>"
containing predicted p_unp vector for each design.
Writes dataframe where all nucleotides are split up.
'''
# cut out parts of sequence that don't have reactivity data
df['trimmed_sequence'] = df.apply(lambda row: [list(row['sequence'])[x] for x in row['seqpos'] if x < len(row['sequence'])], axis=1)
n_constructs=len(df)
df['length'] = [len(x) for x in df['sequence']]
df = df.loc[df.length>6] # only use seqs longer than length that will be trimmed at polyA stage
# doing this here to only apply to trimmed_sequence, but could be done earlier
df['in_polyA'] = df.apply(lambda row: get_polyA_indicator(row, polyA_len=6), axis=1)
# bad hack (and thats saying something for all the data handling hacks in here)
# ... this is to get rid of reactivity values from constructs
# where the length of the sequence is shorter than the reactivities recorded .. some of these in R75
df[reactivity_field] = df.apply(lambda row: row[reactivity_field][:len(row['sequence'])], axis=1)
concat_data = {reactivity_field: np.concatenate([x for x in df[reactivity_field]]),
'in_polyA': np.concatenate([x for x in df['in_polyA']]),
'nucleotide': np.concatenate([x for x in df['trimmed_sequence']])}
if 'errors' in df.keys():
df['errors'] = df.apply(lambda row: row['errors'][:len(row['sequence'])], axis=1)
concat_data['errors'] = np.concatenate([x for x in df['errors']])
#propagate construct-level information from before
keys = [x for x in df.keys() if x not in [reactivity_field,'sequence','trimmed_sequence','seqpos','errors', 'in_polyA']]
print('n_constructs',n_constructs)
for key in keys:
dat_to_add = []
for i in df.index:
dat_to_add.extend([df[key][i]]*len(df['trimmed_sequence'][i]))
concat_data[key] = dat_to_add
package_list = [x for x in df.keys() if x.startswith('p_')]
for pkg in package_list:
concat_data[pkg] = np.concatenate([x for x in df[pkg]])
# for k, v in concat_data.items():
# print(k, len(v))
return pd.DataFrame(data=concat_data)
def bootstrap_inds(len_item):
return np.random.choice(range(len_item), len_item)
def filter_data(cdf, winsorize_cutoff=95):
'''Filter concatenated dataframe to remove reactivity values below zero and above percentile cutoff.'''
cdf_filt = pd.DataFrame()
orig_len = len(cdf)
cdf = cdf.loc[cdf['reactivity']>0]
cutoff = np.percentile(cdf['reactivity'].values,winsorize_cutoff)
cdf_filt = cdf.loc[cdf['reactivity']<cutoff]
new_len = len(cdf_filt)
print('%d of %d nucleotides (%.2f%%) removed, cutoff = %.2f' % (orig_len-new_len, orig_len, 100*(orig_len-new_len)/orig_len, cutoff))
return cdf_filt
| 40.569149
| 146
| 0.604301
|
a3450751687fe343775d500b98cda79c690ede2c
| 5,378
|
py
|
Python
|
squid/squid.py
|
mrkksparrow/plugins
|
04362fd13ca1d6917f39be2809414c99fbe7c642
|
[
"BSD-2-Clause"
] | 36
|
2016-03-30T10:59:37.000Z
|
2022-02-28T21:46:42.000Z
|
squid/squid.py
|
mrkksparrow/plugins
|
04362fd13ca1d6917f39be2809414c99fbe7c642
|
[
"BSD-2-Clause"
] | 21
|
2016-10-07T06:20:56.000Z
|
2022-03-10T12:09:34.000Z
|
squid/squid.py
|
mrkksparrow/plugins
|
04362fd13ca1d6917f39be2809414c99fbe7c642
|
[
"BSD-2-Clause"
] | 65
|
2016-03-16T09:11:48.000Z
|
2022-03-09T13:10:08.000Z
|
#!/usr/bin/python
import json
import sys
import argparse
import os.path
import time
import urllib.request as urlconnection
# if any impacting changes to this plugin kindly increment the plugin version here.
PLUGIN_VERSION = 1
# Setting this to true will alert you when there is a communication problem while posting plugin data to server
HEARTBEAT = "true"
# Enter the host name configures for the Kong
HOST_NAME = ""
# Enter the port configured for the Kong
PORT = ""
# Check whether the os is Windows or Linux for path of plugin
if sys.platform == "linux":
FILE_PATH = "/opt/site24x7/monagent/plugins/squid/squid_metrics.json"
elif sys.platform == "win32":
FILE_PATH = "C:\Program Files (x86)\Site24x7\WinAgent\monitoring\Plugins\squid\squid_metrics.json"
URL = ""
result_json = {}
#to store previous_data and current_data time data for calculating per second value
output = []
previous_data = {}
current_data = {}
METRIC_UNITS = {
"client_http.requests" : "requests/second",
"client_http.hits" : "hits/second",
"client_http.errors" : "errors/second",
"client_http.kbytes_in" : "kibibytes/second",
"client_http.kbytes_out" : "kibibytes/second",
"client_http.hit_kbytes_out" : "kibibytes/second",
"server.all.requests" : "requests/second",
"server.all.errors" : "errors/second",
"server.all.kbytes_in" : "kibibytes/second",
"server.all.kbytes_out" : "kibibytes/second",
"server.http.requests" : "requests/second",
"server.http.errors" : "errors/second",
"server.http.kbytes_in" : "kibibytes/second",
"server.http.kbytes_out" : "kibibytes/second",
"server.ftp.requests" : "requests/second",
"server.ftp.errors" : "errors/second",
"server.ftp.kbytes_in" : "kibibytes/second",
"server.ftp.kbytes_out" : "kibibytes/second",
"server.other.requests" : "requests/second",
"server.other.errors" : "errors/second",
"server.other.kbytes_in" : "kibibytes/second",
"server.other.kbytes_out" : "kibibytes/second",
"unlink.requests" : "requests/second",
"page_faults" : "faults/second",
"aborted_requests" : "requests/second"
}
#try connection to URL and resturn output
def get_squid_counter():
try:
URL = "http://" + HOST_NAME + ":" + PORT + "/squid-internal-mgr/counters"
response = urlconnection.urlopen(URL)
output = response.read()
output = output.strip()
output = output.decode("utf-8")
output = output.split('\n')
except Exception as e:
output = [False, str(e)]
return output
#method to filter data from HTTP response output
def get_output():
squid_counter = {}
try:
output = get_squid_counter()
for each in output:
counter, value = each.split(" = ")
if counter in METRIC_UNITS.keys():
squid_counter[counter] = value
squid_counter['time'] = time.time()
except Exception as e:
squid_counter["status"] = 0
squid_counter["msg"] = str(e)
return squid_counter
#calculate per second with previous output or calculate per second value with time difference between previous_data and current_data
def calculate_persecond(previous_data, current_data):
result = {}
try:
time_diff = int(current_data['time'] - previous_data['time'])
current_data = get_output()
for each in METRIC_UNITS:
result[each] = format((float(current_data[each]) - float(previous_data[each])) / time_diff, '.2f')
except Exception as e:
result["status"] = 0
result["msg"] = str(e)
with open(FILE_PATH, 'w') as outfile:
json.dump(current_data, outfile)
return result
def collect_data():
result = {}
try:
output = get_squid_counter()
if not output[0]:
if os.path.exists('/squid_metrics.json'):
os.remove(FILE_PATH)
result['status'] = 0
result['msg'] = output[1]
else:
if os.path.exists(FILE_PATH):
with open(FILE_PATH) as json_file:
previous_data = json.load(json_file)
else:
previous_data = get_output()
time.sleep(20)
current_data = get_output()
result = calculate_persecond(previous_data, current_data)
except Exception as e:
result["status"] = 0
result["msg"] = str(e)
return result
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host_name', help="squid host_name", type=str)
parser.add_argument('--port', help="squid port", type=str)
args = parser.parse_args()
if args.host_name:
HOST_NAME = args.host_name
if args.port:
PORT = args.port
#check squid running, if no delete previous file, if yes check if previous file exist, if yes take previous_data as prvious file, else take two data with 20sec time interval
result_json = collect_data()
result_json['plugin_version'] = PLUGIN_VERSION
result_json['heartbeat_required'] = HEARTBEAT
result_json['units'] = METRIC_UNITS
print(json.dumps(result_json, indent=4, sort_keys=False))
| 31.635294
| 177
| 0.632577
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.