repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
Mzero2010/MaxZone
|
plugin.video.Mzero/core/downloader.py
|
1
|
15031
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# Mzero 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/Mzero/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of Mzero 4.
#
# Mzero 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mzero 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mzero 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
"""
Clase Downloader
Downloader(url, path [, filename, headers, resume])
url : string - url para descargar
path : string - Directorio donde se guarda la descarga
filename : [opt] string - Nombre de archivo para guardar
headers : [opt] dict - Headers para usar en la descarga
resume : [opt] bool - continuar una descarga previa en caso de existir, por defecto True
metodos:
start() Inicia la descarga
stop(erase = False) Detiene la descarga, con erase = True elimina los datos descargados
"""
import sys
import os
import re
import urllib2
import urllib
import urlparse
import mimetypes
import time
from core import filetools
from threading import Thread, Lock
class Downloader:
#Informacion:
@property
def state(self):
return self._state
@property
def connections(self):
return len([c for c in self._download_info["parts"] if c["status"] in[self.states.downloading, self.states.connecting]]), self._max_connections
@property
def downloaded(self):
return self.__change_units__(sum([c["current"] - c["start"] for c in self._download_info["parts"]]))
@property
def average_speed(self):
return self.__change_units__(self._average_speed)
@property
def speed(self):
return self.__change_units__(self._speed)
@property
def remaining_time(self):
if self.speed[0] and self._file_size:
t = (self.size[0] - self.downloaded[0]) / self.speed[0]
else:
t = 0
return time.strftime("%H:%M:%S", time.gmtime(t))
@property
def download_url(self):
return self.url
@property
def size(self):
return self.__change_units__(self._file_size)
@property
def progress(self):
if self._file_size:
return float(self.downloaded[0]) * 100 / float(self._file_size)
elif self._state == self.states.completed:
return 100
else:
return 0
@property
def filename(self):
return self._filename
@property
def fullpath(self):
return os.path.abspath(filetools.join(self._path, self._filename))
#Funciones
def start(self):
if self._state == self.states.error: return
self._start_time = time.time() -1
self._state = self.states.downloading
for t in self._threads: t.start()
self._speed_thread.start()
def stop(self, erase=False):
if self._state == self.states.downloading:
#Detenemos la descarga
self._state = self.states.stopped
for t in self._threads:
if t.isAlive(): t.join()
#Guardamos la info al final del archivo
self.file.seek(0,2)
offset = self.file.tell()
self.file.write(str(self._download_info))
self.file.write("%0.16d" % offset)
self.file.close()
if erase: os.remove(filetools.join(self._path, self._filename))
def __speed_metter__(self):
self._speed = 0
self._average_speed = 0
downloaded = self._start_downloaded
downloaded2 = self._start_downloaded
t = time.time()
t2 = time.time()
time.sleep(1)
while self.state == self.states.downloading:
self._average_speed = (self.downloaded[0] - self._start_downloaded) / (time.time() - self._start_time)
self._speed = (self.downloaded[0] - self._start_downloaded) / (time.time() - self._start_time)
#self._speed = (self.downloaded[0] - downloaded) / (time.time() -t)
if time.time() -t > 5:
t = t2
downloaded = downloaded2
t2 = time.time()
downloaded2 = self.downloaded[0]
time.sleep(0.5)
#Funciones internas
def __init__(self, url, path, filename=None, headers=[], resume = True, max_connections = 10, part_size = 2097152):
#Parametros
self._resume = resume
self._path = path
self._filename = filename
self._max_connections = max_connections
self._part_size = part_size
self.states = type('states', (), {"stopped":0, "connecting": 1, "downloading": 2, "completed": 3, "error": 4})
self._block_size = 1024*100
self._state = self.states.stopped
self._write_lock = Lock()
self._download_lock = Lock()
self._headers = {"User-Agent":"Kodi/15.2 (Windows NT 10.0; WOW64) App_Bitness/32 Version/15.2-Git:20151019-02e7013"}
self._speed = 0
self._threads = [Thread(target= self.__start_part__) for x in range(self._max_connections)]
self._speed_thread = Thread(target= self.__speed_metter__)
#Actualizamos los headers
self._headers.update(dict(headers))
#Separamos los headers de la url
self.__url_to_headers__(url)
#Obtenemos la info del servidor
self.__get_download_headers__()
self._file_size = int(self.response_headers.get("content-length", "0"))
if not self.response_headers.get("accept-ranges") == "bytes" or self._file_size == 0:
self._max_connections = 1
self._part_size = 0
self._resume = False
#Obtenemos el nombre del archivo
self.__get_download_filename__()
#Abrimos en modo "a+" para que cree el archivo si no existe, luego en modo "r+b" para poder hacer seek()
self.file = filetools.file_open(filetools.join(self._path, self._filename), "a+")
self.file = filetools.file_open(filetools.join(self._path, self._filename), "r+b")
self.__get_download_info__()
def __url_to_headers__(self, url):
#Separamos la url de los headers adicionales
self.url = url.split("|")[0]
#headers adicionales
if "|" in url:
self._headers.update(dict([[header.split("=")[0],urllib.unquote_plus(header.split("=")[1])] for header in url.split("|")[1].split("&")]))
def __get_download_headers__(self):
for x in range(3):
try:
if not sys.hexversion > 0x0204FFFF:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=self._headers))
conn.fp._sock.close()
else:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=self._headers), timeout=5)
except:
self.response_headers = dict()
self._state = self.states.error
else:
self.response_headers = conn.headers.dict
self._state = self.states.stopped
break
def __get_download_filename__(self):
#Obtenemos nombre de archivo y extension
if "filename" in self.response_headers.get("content-disposition","") and "attachment" in self.response_headers.get("content-disposition",""):
cd_filename, cd_ext = os.path.splitext(urllib.unquote_plus(re.compile("attachment; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1)))
if "filename" in self.response_headers.get("content-disposition","") and "inline" in self.response_headers.get("content-disposition",""):
cd_filename, cd_ext = os.path.splitext(urllib.unquote_plus(re.compile("inline; filename ?= ?[\"|']?([^\"']+)[\"|']?").match(self.response_headers.get("content-disposition")).group(1)))
else:
cd_filename, cd_ext = "",""
url_filename, url_ext = os.path.splitext(urllib.unquote_plus(filetools.basename(urlparse.urlparse(self.url)[2])))
if self.response_headers.get("content-type","application/octet-stream") <> "application/octet-stream":
mime_ext = mimetypes.guess_extension(self.response_headers.get("content-type"))
else:
mime_ext = ""
#Seleccionamos el nombre mas adecuado
if cd_filename:
self.remote_filename = cd_filename
if not self._filename:
self._filename = cd_filename
elif url_filename:
self.remote_filename = url_filename
if not self._filename:
self._filename = url_filename
#Seleccionamos la extension mas adecuada
if cd_ext:
if not cd_ext in self._filename: self._filename += cd_ext
if self.remote_filename: self.remote_filename += cd_ext
elif mime_ext:
if not mime_ext in self._filename: self._filename += mime_ext
if self.remote_filename: self.remote_filename += mime_ext
elif url_ext:
if not url_ext in self._filename: self._filename += url_ext
if self.remote_filename: self.remote_filename += url_ext
def __change_units__(self, value):
import math
units = ["B", "KB", "MB", "GB"]
if value <= 0:
return 0, 0, units[0]
else:
return value, value / 1024.0 ** int(math.log(value,1024)), units[int(math.log(value,1024))]
def __get_download_info__(self):
#Continuamos con una descarga que contiene la info al final del archivo
self._download_info = {}
try:
assert self._resume
self.file.seek(-16,2)
offset = int(self.file.read())
self.file.seek(offset)
a = self.file.read()[:-16]
self._download_info = eval(a)
assert self._download_info["size"] == self._file_size
assert self._download_info["url"] == self.url
self.file.seek(offset)
self.file.truncate()
self._start_downloaded = sum([c["current"] - c["start"] for c in self._download_info["parts"]])
self.pending_parts = [x for x, a in enumerate(self._download_info["parts"]) if not a["status"] == self.states.completed]
#La info no existe o no es correcta, comenzamos de 0
except:
self._download_info["parts"] = []
if self._file_size and self._part_size:
for x in range(0,self._file_size, self._part_size):
end = x + self._part_size -1
if end >= self._file_size: end = self._file_size -1
self._download_info["parts"].append({"start": x, "end": end, "current": x, "status": self.states.stopped})
else:
self._download_info["parts"].append({"start": 0, "end": self._file_size-1, "current": 0, "status": self.states.stopped})
self._download_info["size"] = self._file_size
self._download_info["url"] = self.url
self._start_downloaded = 0
self.pending_parts = [x for x in range(len(self._download_info["parts"]))]
self.file.seek(0)
self.file.truncate()
def __open_connection__(self, start, end):
headers = self._headers.copy()
if not end: end = ""
headers.update({"Range": "bytes=%s-%s" % (start, end)})
if not sys.hexversion > 0x0204FFFF:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=headers))
else:
conn = urllib2.urlopen(urllib2.Request(self.url, headers=headers), timeout=5)
return conn
def __start_part__(self):
while self._state == self.states.downloading:
self._download_lock.acquire()
if len(self.pending_parts):
id = min(self.pending_parts)
self.pending_parts.remove(id)
self._download_lock.release()
#Si no, Termina el thread
else:
if len([x for x, a in enumerate(self._download_info["parts"]) if a["status"] in [self.states.downloading, self.states.connecting]]) == 0:
self._state = self.states.completed
self.file.close()
self._download_lock.release()
break
#Si comprueba si ya está completada, y si lo esta, pasa a la siguiente
if self._download_info["parts"][id]["current"] > self._download_info["parts"][id]["end"] and self._download_info["parts"][id]["end"] > -1:
self._download_info["parts"][id]["status"] = self.states.completed
continue
#Marca el estado como conectando
self._download_info["parts"][id]["status"] = self.states.connecting
#Intenta la conixion, en caso de error, vuelve a poner la parte en la lista de pendientes
try:
connection = self.__open_connection__(self._download_info["parts"][id]["current"], self._download_info["parts"][id]["end"])
except:
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.append(id)
time.sleep(5)
continue
else:
self._download_info["parts"][id]["status"] = self.states.downloading
#Comprobamos que el trozo recibido es el que necesitamos
if self._download_info["parts"][id]["current"] <> int(connection.info().get("content-range","bytes 0-").split(" ")[1].split("-")[0]):
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.append(id)
continue
while self._state == self.states.downloading:
try:
buffer = connection.read(self._block_size)
except:
self._download_info["parts"][id]["status"] = self.states.error
self.pending_parts.append(id)
break
else:
if len(buffer):
self._write_lock.acquire()
self.file.seek(self._download_info["parts"][id]["current"])
self.file.write(buffer)
self._download_info["parts"][id]["current"] +=len(buffer)
self._write_lock.release()
else:
connection.fp._sock.close()
self._download_info["parts"][id]["status"] = self.states.completed
break
if self._download_info["parts"][id]["status"] == self.states.downloading:
self._download_info["parts"][id]["status"] = self.states.stopped
|
gpl-3.0
| 5,840,123,317,750,125,000
| 36.297767
| 198
| 0.586361
| false
| 3.72583
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/services/age_range_view_service/client.py
|
1
|
18043
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import age_range_view
from google.ads.googleads.v8.services.types import age_range_view_service
from .transports.base import AgeRangeViewServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AgeRangeViewServiceGrpcTransport
class AgeRangeViewServiceClientMeta(type):
"""Metaclass for the AgeRangeViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AgeRangeViewServiceTransport]]
_transport_registry["grpc"] = AgeRangeViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AgeRangeViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AgeRangeViewServiceClient(metaclass=AgeRangeViewServiceClientMeta):
"""Service to manage age range views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
AgeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> AgeRangeViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
AgeRangeViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def age_range_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified age_range_view string."""
return "customers/{customer_id}/ageRangeViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_age_range_view_path(path: str) -> Dict[str, str]:
"""Parse a age_range_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/ageRangeViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, AgeRangeViewServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the age range view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AgeRangeViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AgeRangeViewServiceTransport):
# transport is a AgeRangeViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = AgeRangeViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_age_range_view(
self,
request: age_range_view_service.GetAgeRangeViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> age_range_view.AgeRangeView:
r"""Returns the requested age range view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetAgeRangeViewRequest`):
The request object. Request message for
[AgeRangeViewService.GetAgeRangeView][google.ads.googleads.v8.services.AgeRangeViewService.GetAgeRangeView].
resource_name (:class:`str`):
Required. The resource name of the
age range view to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.AgeRangeView:
An age range view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a age_range_view_service.GetAgeRangeViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, age_range_view_service.GetAgeRangeViewRequest
):
request = age_range_view_service.GetAgeRangeViewRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_age_range_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("AgeRangeViewServiceClient",)
|
apache-2.0
| -6,773,315,631,857,939,000
| 40.28833
| 124
| 0.619853
| false
| 4.49166
| false
| false
| false
|
fnivek/Pop-a-Gator
|
scripts/game_sir_interface.py
|
1
|
3921
|
#!/usr/bin/env python
from bluetooth import *
import sys
from collections import OrderedDict
import argparse
import serial
# Take in command line args
parser = argparse.ArgumentParser(description='Interface with a game sir remote')
parser.add_argument('--pass_to_serial', action='store_true',
help='Pass the bluetooth data over to the serial connection')
parser.add_argument('--print_log', action='store_true',
help='Print the log of all raw data')
parser.add_argument('--device', default="/dev/serial/by-id/usb-PopaGator_Toad-if00",
help='Name of the serial device to pass the bluetooth data to')
cmd_args = parser.parse_args()
# Connect to serial interface
ser = serial.Serial()
if cmd_args.pass_to_serial:
print "Connecting to device:\t" + cmd_args.device + "..."
# Open a serial port
ser = serial.Serial("/dev/serial/by-id/usb-PopaGator_Toad-if00", 115200)
# Send data to start USB OTG
ser.write("start")
print "Connected to device:\t" + cmd_args.device
# Connect to bluetooth
print "Connecting to gamesir over bluetooth..."
services = find_service()
gamepad = None
for svc in services:
if svc['name'] == 'SPP Channel':
gamepad = svc
if gamepad is None:
print "Failed to find gamepad"
sys.exit(0)
protocol = gamepad['protocol']
if protocol == 'RFCOMM':
protocol = RFCOMM
elif protocol == 'L2CAP':
protocol = L2CAP
else:
print "Unkown service!"
sys.exit(0)
sock=BluetoothSocket( protocol )
sock.connect((gamepad['host'], int(gamepad['port'])))
print 'Connected to gamesir over bluetooth'
gamepad_map = OrderedDict()
gamepad_map['LEFT_STICK_LR'] = 2
gamepad_map['LEFT_STICK_UD'] = 3
gamepad_map['RIGHT_STICK_LR'] = 4
gamepad_map['RIGHT_STICK_UD'] = 5
gamepad_map['LEFT_TRIGGER'] = 6
gamepad_map['RIGHT_TRIGGER'] = 7
gamepad_map['ABXY_BUMPERS'] = 8
gamepad_map['SELECT_START_STICKS_?'] = 9 # The left and the right triggers if depresed far enough will set a bit
gamepad_map['DPAD'] = 10
button_bitmask = {
'ABXY_BUMPERS' : [('A', 0x1), ('B', 0x2), ('X', 0x8), ('Y', 0x10), ('LEFT_BUMPER', 0x40), ('RIGHT_BUMPER', 0x80)],
'SELECT_START_STICKS_?' : [('SELECT', 0x4), ('START', 0x8), ('LEFT_STICK', 0x20), ('RIGHT_STICK', 0x40) ]#('LEFT_TRIGGER', 0x1), ('RIGHT_TRIGGER', 0x2)]
}
dpad_map = {
0 : 'NOT_PRESSED',
1 : 'UP',
2 : 'UP_RIGHT',
3 : 'RIGHT',
4 : 'DOWN_RIGHT',
5 : 'DOWN',
6 : 'DOWN_LEFT',
7 : 'LEFT',
8 : 'UP_LEFT',
}
raw_data = ''
state = ''
try:
while True:
data = sock.recv(1024)
if cmd_args.pass_to_serial:
ser.write(data)
print '-----------------'
formated_data = [ord(c) for c in data]
print formated_data
for d in formated_data:
raw_data += str(d)
raw_data += ', '
if len(formated_data) < 10:
print 'Home button'
continue
state += '{0, '
for name, position in gamepad_map.iteritems():
output = name + " : " + str(formated_data[position])
if name in button_bitmask.keys():
for mask in button_bitmask[name]:
value = mask[1] & formated_data[position]
state += str(value)
state += ', '
output += "\n\t" + mask[0] + ": " + ('1' if (value) else '0')
elif name == "DPAD":
state += str(formated_data[position] & 0xF)
output += "\n\tDirection: " + dpad_map[formated_data[position]]
state += ', '
else:
state += str(formated_data[position])
state += ', '
print output
state += '},\n'
finally:
if cmd_args.print_log:
print raw_data
print '\n'
print state
sock.close()
|
mit
| -7,996,774,728,194,808,000
| 28.488722
| 156
| 0.566692
| false
| 3.306071
| false
| false
| false
|
openbmc/openbmc-test-automation
|
bin/event_notification_util.py
|
1
|
1499
|
#!/usr/bin/env python
r"""
See help text for details.
"""
import sys
save_dir_path = sys.path.pop(0)
modules = ['gen_arg', 'gen_print', 'gen_valid', 'event_notification']
for module in modules:
exec("from " + module + " import *")
sys.path.insert(0, save_dir_path)
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS]',
description="%(prog)s will subscribe and receive event notifications when "
+ "properties change for the given dbus path.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prefix_chars='-+')
parser.add_argument(
'--host',
default='',
help='The host name or IP of the system to subscribe to.')
parser.add_argument(
'--username',
default='root',
help='The username for the host system.')
parser.add_argument(
'--password',
default='',
help='The password for the host system.')
parser.add_argument(
'--dbus_path',
default='',
help='The path to be monitored (e.g. "/xyz/openbmc_project/sensors").')
parser.add_argument(
'--enable_trace',
choices=[0, 1],
default=0,
help='Indicates that trace needs to be enabled.')
# Populate stock_list with options we want.
stock_list = [("test_mode", 0), ("quiet", 0), ("debug", 0)]
def main():
gen_setup()
my_event = event_notification(host, username, password)
event_notifications = my_event.subscribe(dbus_path, enable_trace)
print_var(event_notifications, fmt=[no_header(), strip_brackets()])
main()
|
apache-2.0
| -7,400,601,391,198,828,000
| 25.298246
| 79
| 0.651101
| false
| 3.586124
| false
| false
| false
|
samupl/website
|
apps/blog/models.py
|
1
|
2366
|
from django.conf import settings
from django.db import models
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from django.template.defaultfilters import slugify
class Post(models.Model):
author = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(verbose_name='Date', auto_now_add=True)
title = models.CharField(verbose_name='Title', max_length=1024)
slug = models.SlugField(unique=True)
content = models.TextField(verbose_name='Content')
comment_count = models.PositiveIntegerField(verbose_name='Comment count', default=0, editable=False)
@models.permalink
def get_absolute_url(self):
return 'blog:view', [str(self.id), str(self.slug)]
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.title)
super().save(*args, **kwargs)
def __str__(self):
return '{title} ({date})'.format(
title=self.title,
date=self.date
)
class Label(models.Model):
post = models.ForeignKey(Post)
label = models.CharField(max_length=90)
def __str__(self):
return '{label} ({post})'.format(
label=self.label,
post=self.post
)
class Meta:
unique_together = ['post', 'label']
class Comment(models.Model):
post = models.ForeignKey(Post)
date = models.DateTimeField(verbose_name='Date', auto_now_add=True)
username = models.CharField(verbose_name='Username', max_length=256)
email = models.EmailField(verbose_name='E-mail address')
content = models.TextField(max_length=4096)
ip = models.CharField(max_length=4096)
host = models.CharField(max_length=4096)
ua = models.CharField(null=True, blank=True, max_length=4096)
ref = models.CharField(null=True, blank=True, max_length=4096)
def __str__(self):
return '{username} ({email}, {ip}, {date}, {post})'.format(
username=self.username,
email=self.email,
ip=self.ip,
date=self.date,
post=self.post
)
@receiver(post_delete, sender=Comment)
@receiver(post_save, sender=Comment)
def calculate_comments_count(sender, instance, **kwargs):
entry = instance.post
entry.comment_count = Comment.objects.filter(post=entry).count()
entry.save()
|
mit
| -5,252,156,682,347,990,000
| 32.323944
| 104
| 0.652578
| false
| 3.731861
| false
| false
| false
|
quantifiedcode-bot/invenio-search
|
setup.py
|
1
|
4305
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Invenio module for information retrieval."""
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
requirements = [
'Flask>=0.10.1',
'six>=1.7.2',
'invenio-access>=0.1.0',
'invenio-accounts>=0.1.2',
'invenio-base>=0.1.0',
'invenio-formatter>=0.2.1',
'invenio-knowledge>=0.1.0',
'invenio-query-parser>=0.2',
'invenio-upgrader>=0.1.0',
]
test_requirements = [
'unittest2>=1.1.0',
'Flask_Testing>=0.4.1',
'pytest>=2.7.0',
'pytest-cov>=1.8.0',
'pytest-pep8>=1.0.6',
'coverage>=3.7.1',
]
class PyTest(TestCommand):
"""PyTest Test."""
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
"""Init pytest."""
TestCommand.initialize_options(self)
self.pytest_args = []
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
config = ConfigParser()
config.read('pytest.ini')
self.pytest_args = config.get('pytest', 'addopts').split(' ')
def finalize_options(self):
"""Finalize pytest."""
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
"""Run tests."""
# import here, cause outside the eggs aren't loaded
import pytest
import _pytest.config
pm = _pytest.config.get_plugin_manager()
pm.consider_setuptools_entrypoints()
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio_search', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio-search',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='invenio TODO',
license='GPLv2',
author='CERN',
author_email='info@invenio-software.org',
url='https://github.com/inveniosoftware/invenio-search',
packages=[
'invenio_search',
],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=requirements,
extras_require={
'docs': [
'Sphinx>=1.3',
'sphinx_rtd_theme>=0.1.7'
],
'tests': test_requirements
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.3',
# 'Programming Language :: Python :: 3.4',
'Development Status :: 1 - Planning',
],
tests_require=test_requirements,
cmdclass={'test': PyTest},
)
|
gpl-2.0
| 5,711,932,758,240,129,000
| 29.75
| 76
| 0.628339
| false
| 3.809735
| true
| false
| false
|
svebk/DeepSentiBank_memex
|
workflows/images-incremental-update/images-incremental-update.py
|
1
|
58831
|
import os
import json
import time
import calendar
import datetime
import dateutil.parser
import sys
print(sys.version)
import subprocess
dev = False
if dev:
dev_release_suffix = "_dev"
base_incremental_path = '/user/skaraman/data/images_incremental_update_dev/'
else:
dev_release_suffix = "_release"
base_incremental_path = '/user/worker/dig2/incremental/'
from optparse import OptionParser
from pyspark import SparkContext, SparkConf, StorageLevel
from elastic_manager import ES
from hbase_manager import HbaseManager
# deprecated, now uptonow option
#query_ts_minmax = True # Otherwise get everything after es_ts_start
day_gap = 86400000 # One day
ts_gap = day_gap
time_sleep_update_out = 10
#ts_gap = 10000000
#ts_gap = 10000
# default settings
#fields_cdr = ["obj_stored_url", "obj_parent", "obj_original_url", "timestamp", "crawl_data.image_id", "crawl_data.memex_ht_id"]
max_ts = 9999999999999
fields_cdr = ["obj_stored_url", "obj_parent"]
fields_list = [("info","all_cdr_ids"), ("info","s3_url"), ("info","all_parent_ids"), ("info","image_discarded"), ("info","cu_feat_id")]
##-- General RDD I/O
##------------------
def get_list_value(json_x,field_tuple):
return [x["value"] for x in json_x if x["columnFamily"]==field_tuple[0] and x["qualifier"]==field_tuple[1]]
def check_hdfs_file(hdfs_file_path):
proc = subprocess.Popen(["hdfs", "dfs", "-ls", hdfs_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if "Filesystem closed" in err:
print("[check_hdfs_file: WARNING] Beware got error '{}' when checking for file: {}.".format(err, hdfs_file_path))
sys.stdout.flush()
print "[check_hdfs_file] out: {}, err: {}".format(out, err)
return out, err
def hdfs_file_exist(hdfs_file_path):
out, err = check_hdfs_file(hdfs_file_path)
# too restrictive as even log4j error would be interpreted as non existing file
#hdfs_file_exist = "_SUCCESS" in out and not "_temporary" in out and not err
hdfs_file_exist = "_SUCCESS" in out
return hdfs_file_exist
def hdfs_file_failed(hdfs_file_path):
out, err = check_hdfs_file(hdfs_file_path)
hdfs_file_failed = "_temporary" in out
return hdfs_file_failed
def load_rdd_json(basepath_save, rdd_name):
rdd_path = basepath_save + "/" + rdd_name
rdd = None
try:
if hdfs_file_exist(rdd_path):
print("[load_rdd_json] trying to load rdd from {}.".format(rdd_path))
rdd = sc.sequenceFile(rdd_path).mapValues(json.loads)
except Exception as inst:
print("[load_rdd_json: caught error] could not load rdd from {}. Error was {}.".format(rdd_path, inst))
return rdd
def save_rdd_json(basepath_save, rdd_name, rdd, incr_update_id, hbase_man_update_out):
rdd_path = basepath_save + "/" + rdd_name
if not rdd.isEmpty():
try:
if not hdfs_file_exist(rdd_path):
print("[save_rdd_json] saving rdd to {}.".format(rdd_path))
rdd.mapValues(json.dumps).saveAsSequenceFile(rdd_path)
else:
print("[save_rdd_json] skipped saving rdd to {}. File already exists.".format(rdd_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, rdd_path, rdd_name+"_path")
except Exception as inst:
print("[save_rdd_json: caught error] could not save rdd at {}, error was {}.".format(rdd_path, inst))
else:
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
# is this inducing respawn when called twice within short timespan?
# should we reinstantiate a different hbase_man_update_out every time?
def save_info_incremental_update(hbase_man_update_out, incr_update_id, info_value, info_name):
print("[save_info_incremental_update] saving update info {}: {}".format(info_name, info_value))
incr_update_infos_list = []
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", info_name, str(info_value)]))
incr_update_infos_rdd = sc.parallelize(incr_update_infos_list)
hbase_man_update_out.rdd2hbase(incr_update_infos_rdd)
##------------------
##-- END General RDD I/O
##-- S3 URL functions
##-------------------
def clean_up_s3url_sha1(data):
try:
s3url = unicode(data[0]).strip()
json_x = [json.loads(x) for x in data[1].split("\n")]
sha1 = get_list_value(json_x,("info","sha1"))[0].strip()
return [(s3url, sha1)]
except:
print("[clean_up_s3url_sha1] failed, data was: {}".format(data))
return []
def get_SHA1_from_URL(URL):
import image_dl
sha1hash = image_dl.get_SHA1_from_URL_StringIO(URL,1) # 1 is verbose level
return sha1hash
def get_row_sha1(URL_S3,verbose=False):
row_sha1 = None
if type(URL_S3) == unicode and URL_S3 != u'None' and URL_S3.startswith('https://s3'):
row_sha1 = get_SHA1_from_URL(URL_S3)
if row_sha1 and verbose:
print "Got new SHA1 {} from_url {}.".format(row_sha1,URL_S3)
return row_sha1
def check_get_sha1_s3url(data):
URL_S3 = data[0]
row_sha1 = get_row_sha1(unicode(URL_S3),0)
if row_sha1:
return [(URL_S3, (list(data[1][0]), row_sha1))]
return []
def get_s3url_sha1(data):
sha1 = data[0]
json_x = data[1]
try:
s3url_list = get_list_value(json_x,("info","obj_stored_url"))
sha1_list = get_list_value(json_x,("info","sha1"))
if s3url_list and sha1_list:
s3url = s3url_list[0].strip()
sha1 = sha1_list[0].strip()
if not s3url.startswith('https://s3'):
raise ValueError('s3url is not stored in S3.')
else:
if not sha1_list:
raise ValueError('sha1 is not computed.')
if not s3url_list:
raise ValueError('s3url is absent.')
except Exception as inst:
print "[get_s3url_sha1: error] Could not get sha1 or s3url for row {}. {}".format(key, inst)
return []
if sha1 and s3url:
return [(s3url, [s3url, "info", "sha1", sha1.upper()])]
return []
def reduce_s3url_infos(a,b):
a.extend(b)
return a
def reduce_s3_keep_one_sha1(a,b):
if a != b:
raise ValueError("[reduce_s3_keep_one_sha1: error] one s3url has two differnet sha1 values {} and {}.".format(a, b))
return a
def hbase_out_s3url_sha1(data):
s3_url = data[0]
sha1 = data[1]
if sha1 and s3_url:
return [(s3_url, [s3_url, "info", "sha1", sha1.upper()])]
return []
def to_s3_url_key_dict_list(data):
doc_id = data[0]
v = data[1]
tup_list = []
if "info:obj_stored_url" in v:
s3url = v["info:obj_stored_url"]
if s3url.startswith('https://s3'):
v["info:doc_id"] = doc_id
tup_list = [(s3url, [v])]
return tup_list
def s3url_dict_list_to_cdr_id_wsha1(data):
if len(data[1]) != 2 or data[1][1] is None or data[1][1] == 'None' or data[1][1] == u'None':
print("[s3url_dict_list_to_cdr_id_wsha1] incorrect data: {}".format(data))
return []
s3_url = data[0]
list_v = data[1][0]
sha1 = data[1][1]
tup_list = []
for v in list_v:
if sha1:
doc_id = v["info:doc_id"]
if type(sha1) == list and len(sha1)==1:
v["info:sha1"] = sha1[0]
else:
v["info:sha1"] = sha1
tup_list.append((doc_id, v))
return tup_list
def dump_s3url_info_list_dict(x):
v = dict()
v["left"] = dict()
i = 0
#for w in list(x[0]):
for w in x[0]:
if w:
v["left"][str(i)] = json.dumps(w)
i += 1
if x[1]:
v["right"] = x[1]
return json.dumps(v)
def load_s3url_info_list_dict(x):
v = json.loads(x)
x0 = []
x1 = []
for w in v["left"]:
x0.append(json.loads(v["left"][w]))
#x0.append(json.loads(w))
if "right" in v:
x1 = v["right"]
return (x0, x1)
def load_s3url_infos_rdd_join(s3url_infos_rdd_join_path):
s3url_infos_rdd_join = None
try:
if hdfs_file_exist(s3url_infos_rdd_join_path):
s3url_infos_rdd_join = sc.sequenceFile(s3url_infos_rdd_join_path).mapValues(load_s3url_info_list_dict)
print("[load_s3url_infos_rdd_join: info] first samples of s3url_infos_rdd_join looks like: {}".format(s3url_infos_rdd_join.take(5)))
except Exception as inst:
print("[load_s3url_infos_rdd_join: caught error] Could not load rdd at {}. Error was {}.".format(s3url_infos_rdd_join_path, inst))
return s3url_infos_rdd_join
def save_s3url_infos_rdd_join(s3url_infos_rdd_join, hbase_man_update_out, incr_update_id, s3url_infos_rdd_join_path, s3url_infos_rdd_join_path_str):
try:
if not hdfs_file_exist(s3url_infos_rdd_join_path):
print("[save_s3url_infos_rdd_join: info] saving 's3url_infos_rdd_join' to {}.".format(s3url_infos_rdd_join_path))
s3url_infos_rdd_join_tosave = s3url_infos_rdd_join.mapValues(dump_s3url_info_list_dict)
print("[save_s3url_infos_rdd_join: info] first samples of s3url_infos_rdd_join_tosave looks like: {}".format(s3url_infos_rdd_join_tosave.take(5)))
s3url_infos_rdd_join_tosave.saveAsSequenceFile(s3url_infos_rdd_join_path)
else:
print("[save_s3url_infos_rdd_join] skipped saving rdd to {}. File already exists.".format(s3url_infos_rdd_join_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, s3url_infos_rdd_join_path, s3url_infos_rdd_join_path_str)
except Exception as inst:
print("[save_s3url_infos_rdd_join: caught error] could not save rdd at {}, error was {}.".format(s3url_infos_rdd_join_path, inst))
def get_s3url_infos_rdd_join(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time):
rdd_name = "s3url_infos_rdd_join"
s3url_infos_rdd_join_path = basepath_save + "/" + rdd_name
# always try to load from disk
s3url_infos_rdd_join = load_s3url_infos_rdd_join(s3url_infos_rdd_join_path)
if s3url_infos_rdd_join is not None:
print("[get_s3url_infos_rdd_join: info] loaded rdd from {}.".format(s3url_infos_rdd_join_path))
return s3url_infos_rdd_join
# get dependency cdr_ids_infos_rdd
cdr_ids_infos_rdd = get_cdr_ids_infos_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time)
if cdr_ids_infos_rdd is None:
print("[get_s3url_infos_rdd_join] cdr_ids_infos_rdd is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
print("[get_s3url_infos_rdd_join: info] computing rdd s3url_infos_rdd_join.")
# there could be duplicates cdr_id near indices boundary or corrections might have been applied...
cdr_ids_infos_rdd_red = cdr_ids_infos_rdd.reduceByKey(reduce_cdrid_infos)
# invert cdr_ids_infos_rdd (k,v) into s3url_infos_rdd (v[s3_url],[v,v['cdr_id']=k])
s3url_infos_rdd = cdr_ids_infos_rdd_red.flatMap(to_s3_url_key_dict_list)
s3url_infos_rdd_red = s3url_infos_rdd.reduceByKey(reduce_s3url_infos)
# get some stats
s3url_infos_rdd_count = s3url_infos_rdd.count()
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_count is: {}".format(s3url_infos_rdd_count))
s3url_infos_rdd_red_count = s3url_infos_rdd_red.count()
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_red_count is: {}".format(s3url_infos_rdd_red_count))
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_red first samples looks like: {}".format(s3url_infos_rdd_red.take(10)))
if c_options.join_s3url:
try:
# try to reload from disk
s3url_sha1_rdd = sc.sequenceFile(basepath_save + "/s3url_sha1_rdd")
except Exception as inst:
# read s3url_sha1 table into s3url_sha1 to get sha1 here without downloading images
print("[get_s3url_infos_rdd_join] starting to read from s3url_sha1 HBase table.")
s3url_sha1_rdd = hbase_man_s3url_sha1_in.read_hbase_table().flatMap(clean_up_s3url_sha1)
# never save that anymore, too big.
# try:
# s3url_sha1_rdd.saveAsSequenceFile(basepath_save + "/s3url_sha1_rdd")
# except Exception as inst:
# pass
s3url_sha1_rdd_count = s3url_sha1_rdd.count()
print("[get_s3url_infos_rdd_join: info] s3url_sha1_rdd_count is: {}".format(s3url_sha1_rdd_count))
s3url_sha1_rdd_partitioned = s3url_sha1_rdd.partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
s3url_infos_rdd_red_partitioned = s3url_infos_rdd_red.partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
#print("[get_s3url_infos_rdd_join] start running 's3url_infos_rdd_red.cogroup(s3url_sha1_rdd)'.")
#s3url_infos_rdd_join = s3url_infos_rdd_red_partitioned.cogroup(s3url_sha1_rdd_partitioned)
print("[get_s3url_infos_rdd_join] start running 's3url_infos_rdd_red.leftOuterJoin(s3url_sha1_rdd)'.")
s3url_infos_rdd_join = s3url_infos_rdd_red_partitioned.leftOuterJoin(s3url_sha1_rdd_partitioned)
s3url_infos_rdd_join_count = s3url_infos_rdd_join.count()
print("[get_s3url_infos_rdd_join: info] s3url_infos_rdd_join_count is: {}".format(s3url_infos_rdd_join_count))
else:
print("[get_s3url_infos_rdd_join: info] skipping join with s3url_sha1 table as requested from options.")
# Fake a join so everything after run the same way.
# The real would have a SHA1 has right side value for already existing s3 URLs
s3url_infos_rdd_join = s3url_infos_rdd_red.mapValues(lambda x: (x, None))
# Save rdd
if c_options.save_inter_rdd:
save_s3url_infos_rdd_join(s3url_infos_rdd_join, hbase_man_update_out, incr_update_id, s3url_infos_rdd_join_path, "s3url_infos_rdd_join_path")
return s3url_infos_rdd_join
def save_new_s3_url(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
## save out newly computed s3url
cdr_ids_infos_rdd_new_sha1 = get_cdr_ids_infos_rdd_new_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if cdr_ids_infos_rdd_new_sha1 is None:
print("[save_new_s3_url] cdr_ids_infos_rdd_new_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, "new_s3url_sha1_rdd_count")
return
# invert cdr_ids_infos_rdd_new_sha1 to (s3url, sha1) and apply reduceByKey() selecting any sha1
new_s3url_sha1_rdd = cdr_ids_infos_rdd_new_sha1.flatMap(cdrid_key_to_s3url_key_sha1_val)
out_new_s3url_sha1_rdd = new_s3url_sha1_rdd.reduceByKey(reduce_s3_keep_one_sha1).flatMap(hbase_out_s3url_sha1)
print("[save_new_s3_url: info] out_new_s3url_sha1_rdd first samples look like: {}".format(out_new_s3url_sha1_rdd.take(5)))
print("[save_new_s3_url] saving 'out_new_s3url_sha1_rdd' to HBase.")
hbase_man_s3url_sha1_out.rdd2hbase(out_new_s3url_sha1_rdd)
## save new images update infos
new_s3url_sha1_rdd_count = out_new_s3url_sha1_rdd.count()
print("[save_new_s3_url] new_s3url_sha1_rdd_count count: {}".format(new_s3url_sha1_rdd_count))
save_info_incremental_update(hbase_man_update_out, incr_update_id, new_s3url_sha1_rdd_count, "new_s3url_sha1_rdd_count")
##-------------------
##-- END S3 URL functions
## SHA1 and CDR ids related functions
def expand_info(data):
key = data[0]
json_x = data[1]
out = []
for field in json_x:
fs = field.split(':')
out.append((key, [key, fs[0], fs[1], json_x[field]]))
return out
def create_images_tuple(data):
doc_id = data[0]
json_x = json.loads(data[1])
ts = json_x["_metadata"]["_timestamp"]
key = str(max_ts-ts)+"_"+doc_id
tup_list=[ (key, [key, "info", "doc_id", doc_id])]
for field in fields_cdr:
try:
field_value = json_x[field][0]
if field.endswith("url"):
str_field_value = unicode(field_value)
else:
str_field_value = str(field_value)
tup_list.append( (key, [key, "info", field, str_field_value]) )
except Exception as inst:
pass
return tup_list
def cdrid_key_to_sha1_key(data):
cdr_id = data[0]
json_x = data[1]
sha1 = None
obj_stored_url = None
obj_parent = None
try:
sha1_val = json_x["info:sha1"]
if type(sha1_val)==list and len(sha1_val)==1:
sha1 = sha1_val[0].strip()
else:
sha1 = sha1_val.strip()
obj_stored_url = unicode(json_x["info:obj_stored_url"].strip())
obj_parent = json_x["info:obj_parent"].strip()
except Exception as inst2:
print("[cdrid_key_to_sha1_key] could not read sha1, obj_stored_url or obj_parent for cdr_id {}".format(cdr_id))
pass
if cdr_id and sha1 and obj_stored_url and obj_parent:
return [(sha1, {"info:all_cdr_ids": [cdr_id], "info:s3_url": [obj_stored_url], "info:all_parent_ids": [obj_parent]})]
return []
def cdrid_key_to_s3url_key_sha1_val(data):
json_x = data[1]
sha1 = None
obj_stored_url = None
try:
sha1_val = json_x["info:sha1"]
if type(sha1_val)==list and len(sha1_val)==1:
sha1 = sha1_val[0].strip()
else:
sha1 = sha1_val.strip()
obj_stored_url = unicode(json_x["info:obj_stored_url"].strip())
except Exception as inst2:
pass
if obj_stored_url and sha1:
return [(obj_stored_url, sha1)]
return []
def sha1_key_json(data):
sha1 = data[0]
json_x = [json.loads(x) for x in data[1].split("\n")]
v = dict()
for field in fields_list:
try:
if field[1]!='s3_url':
v[':'.join(field)] = list(set([x for x in get_list_value(json_x,field)[0].strip().split(',')]))
else:
v[':'.join(field)] = [unicode(get_list_value(json_x,field)[0].strip())]
except: # field not in row
pass
return [(sha1, v)]
def reduce_cdrid_infos(a,b):
''' If we have two samples with the same cdr_id we want to keep the newest
that may be a correction of the older one.
'''
c = dict()
if a["info:insert_ts"] > b["info:insert_ts"]:
c = a
else:
c = b
return c
def safe_reduce_infos(a, b, c, field):
try:
c[field] = list(set(a[field]+b[field]))
except Exception as inst:
try:
c[field] = a[field]
print("[safe_reduce_infos: error] key error for '{}' for b".format(field))
except Exception as inst2:
try:
c[field] = b[field]
print("[safe_reduce_infos: error] key error for '{}' for a".format(field))
except Exception as inst3:
c[field] = []
print("[safe_reduce_infos: error] key error for '{}' for both a and b".format(field))
return c
def safe_assign(a, c, field, fallback):
if field in a:
c[field] = a[field]
else:
print("[safe_assign: error] we have no {}.".format(field))
c[field] = fallback
return c
def test_info_s3_url(dict_img):
return "info:s3_url" in dict_img and dict_img["info:s3_url"] and dict_img["info:s3_url"][0]!=u'None' and dict_img["info:s3_url"][0].startswith('https://s3')
def reduce_sha1_infos_discarding(a,b):
c = dict()
if b: # sha1 already existed
if "info:image_discarded" in a or "info:image_discarded" in b:
c["info:all_cdr_ids"] = []
c["info:all_parent_ids"] = []
c["info:image_discarded"] = 'discarded because has more than {} cdr_ids'.format(max_images_reduce)
else:
# KeyError: 'info:all_cdr_ids'. How could an image not have this field?
c = safe_reduce_infos(a, b, c, "info:all_cdr_ids")
c = safe_reduce_infos(a, b, c, "info:all_parent_ids")
#if "info:s3_url" in a and a["info:s3_url"] and a["info:s3_url"].startswith('https://s3') and a["info:s3_url"][0]!=u'None':
if test_info_s3_url(a):
c["info:s3_url"] = a["info:s3_url"]
else:
if test_info_s3_url(b):
c["info:s3_url"] = b["info:s3_url"]
else:
print("[reduce_sha1_infos_discarding: error] both a and b have no s3 url.")
c["info:s3_url"] = [None]
# need to keep info:cu_feat_id if it exists
if "info:cu_feat_id" in b:
c["info:cu_feat_id"] = b["info:cu_feat_id"]
else: # brand new image
c = safe_assign(a, c, "info:s3_url", [None])
c = safe_assign(a, c, "info:all_cdr_ids", [])
c = safe_assign(a, c, "info:all_parent_ids", [])
# should discard if bigger than max(max_images_hbase, max_images_dig)...
if len(c["info:all_cdr_ids"]) > max_images_reduce or len(c["info:all_parent_ids"]) > max_images_reduce:
print("Discarding image with URL: {}".format(c["info:s3_url"][0]))
c["info:all_cdr_ids"] = []
c["info:all_parent_ids"] = []
c["info:image_discarded"] = 'discarded because has more than {} cdr_ids'.format(max_images_reduce)
return c
def split_sha1_kv_images_discarded(x):
# this prepares data to be saved in HBase
tmp_fields_list = [("info","all_cdr_ids"), ("info","s3_url"), ("info","all_parent_ids")]
out = []
if "info:image_discarded" in x[1] or len(x[1]["info:all_cdr_ids"]) > max_images_hbase or len(x[1]["info:all_parent_ids"]) > max_images_hbase:
if "info:image_discarded" not in x[1]:
x[1]["info:image_discarded"] = 'discarded because has more than {} cdr_ids'.format(max_images_hbase)
out.append((x[0], [x[0], "info", "image_discarded", x[1]["info:image_discarded"]]))
str_s3url_value = None
s3url_value = x[1]["info:s3_url"][0]
str_s3url_value = unicode(s3url_value)
out.append((x[0], [x[0], "info", "s3_url", str_s3url_value]))
out.append((x[0], [x[0], "info", "all_cdr_ids", x[1]["info:image_discarded"]]))
out.append((x[0], [x[0], "info", "all_parent_ids", x[1]["info:image_discarded"]]))
else:
for field in tmp_fields_list:
if field[1]=="s3_url":
out.append((x[0], [x[0], field[0], field[1], unicode(x[1][field[0]+":"+field[1]][0])]))
else:
out.append((x[0], [x[0], field[0], field[1], ','.join(x[1][field[0]+":"+field[1]])]))
return out
def flatten_leftjoin(x):
out = []
# at this point value is a tuple of two lists with a single or empty dictionary
c = reduce_sha1_infos_discarding(x[1][0],x[1][1])
out.append((x[0], c))
return out
def to_cdr_id_dict(data):
doc_id = data[0]
v = dict()
json_x = json.loads(data[1])
insert_ts = str(json_x["_metadata"]["_timestamp"])
v["info:insert_ts"] = insert_ts
v["info:doc_id"] = doc_id
del json_x["_metadata"]
for field in json_x:
try:
v["info:"+field] = str(json_x[field][0])
except Exception as inst:
print("[to_cdr_id_dict: error] {} for doc: {}. Assuming it is an encoding issue.".format(inst, doc_id))
try:
v["info:"+field] = json_x[field][0].encode('utf-8')
except Exception as inst2:
print("[to_cdr_id_dict: error] failed again ({}) for doc: {}.".format(inst2, doc_id))
pass
tup_list = [(doc_id, v)]
#print("[to_cdr_id_dict] {}".format(tup_list))
return tup_list
def get_existing_joined_sha1(data):
if len(data[1]) == 2 and data[1][1] and data[1][1] is not None and data[1][1] != 'None' and data[1][1] != u'None':
return True
return False
##-- New images for features computation functions
##---------------
def build_batch_out(batch_update, incr_update_id, batch_id):
update_id = "index_update_"+incr_update_id+'_'+str(batch_id)
list_key = []
for x in batch_update:
list_key.append(x)
return [(update_id, [update_id, "info", "list_sha1s", ','.join(list_key)])]
def save_new_sha1s_for_index_update_batchwrite(new_sha1s_rdd, hbase_man_update_out, batch_update_size, incr_update_id, total_batches, nb_batchwrite=32):
start_save_time = time.time()
# use toLocalIterator if new_sha1s_rdd would be really big and won't fit in the driver's memory
#iterator = new_sha1s_rdd.toLocalIterator()
iterator = new_sha1s_rdd.collect()
batch_update = []
batch_out = []
batch_id = 0
push_batches = False
for x in iterator:
batch_update.append(x)
if len(batch_update)==batch_update_size:
if batch_id > 0 and batch_id % nb_batchwrite == 0:
push_batches = True
try:
print("[save_new_sha1s_for_index_update_batchwrite] preparing batch {}/{} starting with: {}".format(batch_id+1, total_batches, batch_update[:10]))
batch_out.extend(build_batch_out(batch_update, incr_update_id, batch_id))
batch_id += 1
except Exception as inst:
print("[save_new_sha1s_for_index_update_batchwrite] Could not create/save batch {}. Error was: {}".format(batch_id, inst))
batch_update = []
if push_batches:
batch_out_rdd = sc.parallelize(batch_out)
print("[save_new_sha1s_for_index_update_batchwrite] saving {} batches of {} new images to HBase.".format(len(batch_out), batch_update_size))
hbase_man_update_out.rdd2hbase(batch_out_rdd)
batch_out = []
push_batches = False
# last batch
if batch_update:
try:
print("[save_new_sha1s_for_index_update_batchwrite] will prepare and save last batch {}/{} starting with: {}".format(batch_id+1, total_batches, batch_update[:10]))
batch_out.extend(build_batch_out(batch_update, incr_update_id, batch_id))
batch_out_rdd = sc.parallelize(batch_out)
print("[save_new_sha1s_for_index_update_batchwrite] saving {} batches of {} new images to HBase.".format(len(batch_out), batch_update_size))
hbase_man_update_out.rdd2hbase(batch_out_rdd)
#batch_rdd.unpersist()
except Exception as inst:
print("[save_new_sha1s_for_index_update_batchwrite] Could not create/save batch {}. Error was: {}".format(batch_id, inst))
print("[save_new_sha1s_for_index_update_batchwrite] DONE in {}s".format(time.time() - start_save_time))
def save_new_images_for_index(basepath_save, out_rdd, hbase_man_update_out, incr_update_id, batch_update_size, c_options, new_images_to_index_str):
# save images without cu_feat_id that have not been discarded for indexing
new_images_to_index = out_rdd.filter(lambda x: "info:image_discarded" not in x[1] and "info:cu_feat_id" not in x[1])
new_images_to_index_count = new_images_to_index.count()
print("[save_new_images_for_index] {}_count count: {}".format(new_images_to_index_str, new_images_to_index_count))
save_info_incremental_update(hbase_man_update_out, incr_update_id, new_images_to_index_count, new_images_to_index_str+"_count")
import numpy as np
total_batches = int(np.ceil(np.float32(new_images_to_index_count)/batch_update_size))
# partition to the number of batches?
# 'save_new_sha1s_for_index_update' uses toLocalIterator()
new_images_to_index_partitioned = new_images_to_index.partitionBy(total_batches)
# save to HDFS too
if c_options.save_inter_rdd:
try:
new_images_to_index_out_path = basepath_save + "/" + new_images_to_index_str
if not hdfs_file_exist(new_images_to_index_out_path):
print("[save_new_images_for_index] saving rdd to {}.".format(new_images_to_index_out_path))
new_images_to_index_partitioned.keys().saveAsTextFile(new_images_to_index_out_path)
else:
print("[save_new_images_for_index] skipped saving rdd to {}. File already exists.".format(new_images_to_index_out_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, new_images_to_index_out_path, new_images_to_index_str+"_path")
except Exception as inst:
print("[save_new_images_for_index] could not save rdd 'new_images_to_index' at {}, error was {}.".format(new_images_to_index_out_path, inst))
# save by batch in HBase to let the API know it needs to index these images
print("[save_new_images_for_index] start saving by batches of {} new images.".format(batch_update_size))
# crashes in 'save_new_sha1s_for_index_update'?
#save_new_sha1s_for_index_update(new_images_to_index_partitioned.keys(), hbase_man_update_out, batch_update_size, incr_update_id, total_batches)
save_new_sha1s_for_index_update_batchwrite(new_images_to_index_partitioned.keys(), hbase_man_update_out, batch_update_size, incr_update_id, total_batches)
##---------------
##-- END New images for features computation functions
##-- Amandeep RDDs I/O
##---------------
def out_to_amandeep_dict_str(x):
# this is called with map()
sha1 = x[0]
# keys should be: "image_sha1", "all_parent_ids", "s3_url", "all_cdr_ids"
# keep "cu_feat_id" to be able to push images to be indexed
out_dict = dict()
out_dict["image_sha1"] = sha1
for field in ["all_parent_ids", "s3_url", "all_cdr_ids", "cu_feat_id"]:
if "info:"+field in x[1]:
out_dict[field] = x[1]["info:"+field]
return (sha1, json.dumps(out_dict))
def amandeep_dict_str_to_out(x):
# this is called with mapValues()
# keys should be: "image_sha1", "all_parent_ids", "s3_url", "all_cdr_ids"
# keep "cu_feat_id" to be able to push images to be indexed
tmp_dict = json.loads(x)
out_dict = dict()
#sha1 = tmp_dict["image_sha1"]
for field in ["all_parent_ids", "s3_url", "all_cdr_ids", "cu_feat_id"]:
if field in tmp_dict:
out_dict["info:"+field] = tmp_dict[field]
return out_dict
def filter_out_rdd(x):
return "info:image_discarded" not in x[1] and len(x[1]["info:all_cdr_ids"]) <= max_images_dig and len(x[1]["info:all_parent_ids"]) <= max_images_dig
##-- END Amandeep RDDs I/O
##---------------
##-- Incremental update get RDDs main functions
##---------------
def get_cdr_ids_infos_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time):
rdd_name = "cdr_ids_infos_rdd"
# always try to load from disk
cdr_ids_infos_rdd = load_rdd_json(basepath_save, rdd_name)
if cdr_ids_infos_rdd is not None:
print("[get_cdr_ids_infos_rdd: info] cdr_ids_infos_rdd loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return cdr_ids_infos_rdd
if not c_options.uptonow and es_ts_end is not None:
query = "{\"fields\": [\""+"\", \"".join(fields_cdr)+"\"], \"query\": {\"filtered\": {\"query\": {\"match\": {\"content_type\": \"image/jpeg\"}}, \"filter\": {\"range\" : {\"_timestamp\" : {\"gte\" : "+str(es_ts_start)+", \"lt\": "+str(es_ts_end)+"}}}}}, \"sort\": [ { \"_timestamp\": { \"order\": \"asc\" } } ] }"
print("[get_cdr_ids_infos_rdd] query CDR for one day with: {}".format(query))
else:
query = "{\"fields\": [\""+"\", \"".join(fields_cdr)+"\"], \"query\": {\"filtered\": {\"query\": {\"match\": {\"content_type\": \"image/jpeg\"}}, \"filter\": {\"range\" : {\"_timestamp\" : {\"gte\" : "+str(es_ts_start)+"}}}}}, \"sort\": [ { \"_timestamp\": { \"order\": \"asc\" } } ] }"
print("[get_cdr_ids_infos_rdd] query CDR UP TO NOW with: {}".format(query))
# get incremental update
es_rdd_nopart = es_man.es2rdd(query)
if es_rdd_nopart.isEmpty():
print("[get_cdr_ids_infos_rdd] empty incremental update when querying from timestamp {}".format(es_ts_start))
return None
# es_rdd_nopart is likely to be underpartitioned
es_rdd = es_rdd_nopart.partitionBy(nb_partitions)
# save incremental update infos
incr_update_infos_list = []
es_rdd_count = es_rdd.count()
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "start_time", str(start_time)]))
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "es_rdd_count", str(es_rdd_count)]))
incr_update_infos_rdd = sc.parallelize(incr_update_infos_list)
hbase_man_update_out.rdd2hbase(incr_update_infos_rdd)
# save to hbase
images_ts_cdrid_rdd = es_rdd.flatMap(create_images_tuple)
print("[get_cdr_ids_infos_rdd: info] images_ts_cdrid_rdd first samples look like: {}".format(images_ts_cdrid_rdd.take(5)))
print("[get_cdr_ids_infos_rdd] saving 'images_ts_cdrid_rdd' to HBase.")
hbase_man_ts.rdd2hbase(images_ts_cdrid_rdd)
min_ts_cdrid = images_ts_cdrid_rdd.min()[0].strip()
max_ts_cdrid = images_ts_cdrid_rdd.max()[0].strip()
# save incremental update infos
incr_update_infos_list = []
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "min_ts_cdrid", min_ts_cdrid]))
incr_update_infos_list.append((incr_update_id, [incr_update_id, "info", "max_ts_cdrid", max_ts_cdrid]))
incr_update_infos_rdd = sc.parallelize(incr_update_infos_list)
print("[get_cdr_ids_infos_rdd] saving incremental update infos: id {}, min_ts_cdrid {}, max_ts_cdrid {}".format(incr_update_id, min_ts_cdrid, max_ts_cdrid))
hbase_man_update_out.rdd2hbase(incr_update_infos_rdd)
cdr_ids_infos_rdd = es_rdd.flatMap(to_cdr_id_dict)
# save rdd
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, cdr_ids_infos_rdd, incr_update_id, hbase_man_update_out)
return cdr_ids_infos_rdd
def get_cdr_ids_infos_rdd_join_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
rdd_name = "cdr_ids_infos_rdd_join_sha1"
# always try to load from disk
cdr_ids_infos_rdd_join_sha1 = load_rdd_json(basepath_save, rdd_name)
if cdr_ids_infos_rdd_join_sha1 is not None:
print("[get_cdr_ids_infos_rdd_join_sha1: info] cdr_ids_infos_rdd_join_sha1 loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return cdr_ids_infos_rdd_join_sha1
# get dependency s3url_infos_rdd_join
s3url_infos_rdd_join = get_s3url_infos_rdd_join(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time)
if s3url_infos_rdd_join is None:
print("[get_cdr_ids_infos_rdd_join_sha1] s3url_infos_rdd_join is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
# invert s3url_infos_rdd_join (s3_url, ([v], sha1)) into cdr_ids_infos_rdd_join_sha1 (k, [v]) adding info:sha1 in each v dict
s3url_infos_rdd_with_sha1 = s3url_infos_rdd_join.filter(get_existing_joined_sha1)
cdr_ids_infos_rdd_join_sha1 = s3url_infos_rdd_with_sha1.flatMap(s3url_dict_list_to_cdr_id_wsha1)
print("[get_cdr_ids_infos_rdd_join_sha1: info] cdr_ids_infos_rdd_join_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_join_sha1.take(5)))
# save infos to hbase update table
cdr_ids_infos_rdd_join_sha1_count = cdr_ids_infos_rdd_join_sha1.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, cdr_ids_infos_rdd_join_sha1_count, rdd_name+"_count")
# save rdd content to hbase
print("[get_cdr_ids_infos_rdd_join_sha1: info] cdr_ids_infos_rdd_join_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_join_sha1.take(5)))
print("[get_cdr_ids_infos_rdd_join_sha1] saving 'cdr_ids_infos_rdd_join_sha1' to HBase.")
hbase_man_cdrinfos_out.rdd2hbase(cdr_ids_infos_rdd_join_sha1.flatMap(expand_info))
# save rdd to disk
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, cdr_ids_infos_rdd_join_sha1, incr_update_id, hbase_man_update_out)
return cdr_ids_infos_rdd_join_sha1
def get_update_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
rdd_name = "update_join_rdd"
# always try to load from disk
update_join_rdd = load_rdd_json(basepath_save, rdd_name)
if update_join_rdd is not None:
print("[get_update_join_rdd: info] update_join_rdd loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return update_join_rdd
# we need cdr_ids_infos_rdd_join_sha1
cdr_ids_infos_rdd_join_sha1 = get_cdr_ids_infos_rdd_join_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if cdr_ids_infos_rdd_join_sha1 is None:
print("[get_update_join_rdd] cdr_ids_infos_rdd_join_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
# transform cdr_id rdd into sha1 rdd
print("[get_update_join_rdd] cdr_ids_infos_rdd_join_sha1 first samples are: {}".format(cdr_ids_infos_rdd_join_sha1.take(5)))
sha1_infos_rdd_from_join = cdr_ids_infos_rdd_join_sha1.flatMap(cdrid_key_to_sha1_key)
update_join_rdd = sha1_infos_rdd_from_join.reduceByKey(reduce_sha1_infos_discarding)
# save rdd infos
update_join_rdd_count = update_join_rdd.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, update_join_rdd_count, rdd_name+"_count")
# save to disk
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, update_join_rdd, incr_update_id, hbase_man_update_out)
return update_join_rdd
def compute_out_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time):
## check if we not already have computed this join step of this update
out_join_rdd_path = basepath_save + "/out_join_rdd"
out_join_rdd_amandeep = None
update_join_rdd_partitioned = None
sha1_infos_rdd_json = None
if c_options.restart:
try:
if hdfs_file_exist(out_join_rdd_path):
out_join_rdd_amandeep = sc.sequenceFile(out_join_rdd_path).mapValues(amandeep_dict_str_to_out)
except Exception as inst:
pass
if out_join_rdd_amandeep is not None:
# consider already processed
print("[compute_out_join_rdd] out_join_rdd already computed for update {}.".format(incr_update_id))
# if we are re-running this, it might mean we did not manage to save to HBase. Retrying
save_out_rdd_to_hbase(out_join_rdd_amandeep, hbase_man_sha1infos_out)
return out_join_rdd_amandeep
## try to reload rdds that could have already been computed, compute chain of dependencies if needed
# propagate down es_ts_end
update_join_rdd = get_update_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if update_join_rdd is None:
print("[compute_out_join_rdd] update_join_rdd is empty.")
else:
## update cdr_ids, and parents cdr_ids for the existing sha1s (if any)
print("[compute_out_join_rdd] Reading from sha1_infos HBase table.")
sha1_infos_rdd = hbase_man_sha1infos_join.read_hbase_table()
if not sha1_infos_rdd.isEmpty():
update_join_rdd_partitioned = update_join_rdd.partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
sha1_infos_rdd_json = sha1_infos_rdd.flatMap(sha1_key_json).partitionBy(nb_partitions).persist(StorageLevel.MEMORY_AND_DISK)
update_join_sha1_rdd = update_join_rdd_partitioned.leftOuterJoin(sha1_infos_rdd_json).flatMap(flatten_leftjoin)
out_join_rdd_amandeep = update_join_sha1_rdd
else: # first update
out_join_rdd_amandeep = update_join_rdd
## save rdd
if c_options.save_inter_rdd:
if out_join_rdd_amandeep is None or out_join_rdd_amandeep.isEmpty():
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", "out_join_rdd_path")
else:
try:
if not hdfs_file_exist(out_join_rdd_path):
out_join_rdd_amandeep.filter(filter_out_rdd).map(out_to_amandeep_dict_str).saveAsSequenceFile(out_join_rdd_path)
else:
print("[compute_out_join_rdd] Skipped saving out_join_rdd. File already exists at {}.".format(out_join_rdd_path))
save_info_incremental_update(hbase_man_update_out, incr_update_id, out_join_rdd_path, "out_join_rdd_path")
except Exception as inst:
print("[compute_out_join_rdd] could not save rdd at {}, error was {}.".format(out_join_rdd_path, inst))
save_out_rdd_to_hbase(out_join_rdd_amandeep, hbase_man_sha1infos_out)
# if out_join_rdd_amandeep is not None:
# ## save sha1 infos for these joined images in HBase
# out_join_rdd = out_join_rdd_amandeep.flatMap(split_sha1_kv_images_discarded)
# print("[compute_out_join_rdd] saving 'out_join_rdd' to sha1_infos HBase table.")
# hbase_man_sha1infos_out.rdd2hbase(out_join_rdd)
return out_join_rdd_amandeep
def get_cdr_ids_infos_rdd_new_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
## for not matching s3url i.e. missing sha1
rdd_name = "cdr_ids_infos_rdd_new_sha1"
# always try to load from disk
cdr_ids_infos_rdd_new_sha1 = load_rdd_json(basepath_save, rdd_name)
if cdr_ids_infos_rdd_new_sha1 is not None:
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_new_sha1.take(5)))
return cdr_ids_infos_rdd_new_sha1
# get joined (actually all s3 urls of current update if not c_options.join_s3url), subtract, download images
s3url_infos_rdd_join = get_s3url_infos_rdd_join(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_update_out, incr_update_id, nb_partitions, c_options, start_time)
if s3url_infos_rdd_join is not None:
s3url_infos_rdd_with_sha1 = s3url_infos_rdd_join.filter(get_existing_joined_sha1)
if not s3url_infos_rdd_with_sha1.isEmpty():
s3url_infos_rdd_no_sha1 = s3url_infos_rdd_join.subtractByKey(s3url_infos_rdd_with_sha1)
else: # when all new s3 urls or not c_options.join_s3url
s3url_infos_rdd_no_sha1 = s3url_infos_rdd_join
s3url_infos_rdd_no_sha1_count = s3url_infos_rdd_no_sha1.count()
print("[get_cdr_ids_infos_rdd_new_sha1: info] starting to download images to get new sha1s for {} URLs.".format(s3url_infos_rdd_no_sha1_count))
s3url_infos_rdd_new_sha1 = s3url_infos_rdd_no_sha1.partitionBy(nb_partitions).flatMap(check_get_sha1_s3url)
cdr_ids_infos_rdd_new_sha1 = s3url_infos_rdd_new_sha1.flatMap(s3url_dict_list_to_cdr_id_wsha1)
else:
cdr_ids_infos_rdd_new_sha1 = None
if cdr_ids_infos_rdd_new_sha1 is None or cdr_ids_infos_rdd_new_sha1.isEmpty():
print("[get_cdr_ids_infos_rdd_new_sha1] cdr_ids_infos_rdd_new_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
else:
# save rdd
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_new_sha1.take(5)))
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, "cdr_ids_infos_rdd_new_sha1", cdr_ids_infos_rdd_new_sha1, incr_update_id, hbase_man_update_out)
# save infos
cdr_ids_infos_rdd_new_sha1_count = cdr_ids_infos_rdd_new_sha1.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, cdr_ids_infos_rdd_new_sha1_count, "cdr_ids_infos_rdd_new_sha1_count")
print("[get_cdr_ids_infos_rdd_new_sha1: info] cdr_ids_infos_rdd_new_sha1 first samples look like: {}".format(cdr_ids_infos_rdd_new_sha1.take(5)))
print("[get_cdr_ids_infos_rdd_new_sha1] saving 'cdr_ids_infos_rdd_new_sha1' to HBase.")
hbase_man_cdrinfos_out.rdd2hbase(cdr_ids_infos_rdd_new_sha1.flatMap(expand_info))
return cdr_ids_infos_rdd_new_sha1
def get_update_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options):
rdd_name = "update_rdd"
# always try to load from disk
update_rdd = load_rdd_json(basepath_save, rdd_name)
if update_rdd is not None:
print("[get_update_rdd: info] update_rdd loaded rdd from {}.".format(basepath_save + "/" + rdd_name))
return update_rdd
cdr_ids_infos_rdd_new_sha1 = get_cdr_ids_infos_rdd_new_sha1(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if cdr_ids_infos_rdd_new_sha1 is None:
print("[get_update_rdd] cdr_ids_infos_rdd_new_sha1 is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
# here new sha1s means we did not see the corresponding s3url before, but the sha1 may still be in the sha1_infos table
# so we still need to merge potentially
update_rdd = cdr_ids_infos_rdd_new_sha1.flatMap(cdrid_key_to_sha1_key).reduceByKey(reduce_sha1_infos_discarding)
update_rdd_count = update_rdd.count()
save_info_incremental_update(hbase_man_update_out, incr_update_id, update_rdd_count, "update_rdd_count")
# save to disk
if c_options.save_inter_rdd:
save_rdd_json(basepath_save, rdd_name, update_rdd, incr_update_id, hbase_man_update_out)
# also return update_rdd_count to allows dynamic partitioning?
return update_rdd
def compute_out_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time):
## check if we not already have computed this join step of this update
rdd_name = "out_rdd"
out_rdd_path = basepath_save + "/" + rdd_name
out_rdd_amandeep = None
update_rdd_partitioned = None
sha1_infos_rdd_json = None
if c_options.restart:
print "[compute_out_rdd] Looking for:",out_rdd_path
try:
if hdfs_file_exist(out_rdd_path):
out_rdd_amandeep = sc.sequenceFile(out_rdd_path).mapValues(amandeep_dict_str_to_out)
except Exception as inst:
# would mean file existed but corrupted?
pass
if out_rdd_amandeep is not None:
# consider already processed
print("[compute_out_rdd] out_rdd already computed for update {}.".format(incr_update_id))
# we should try to check if saving to hbase_man_sha1infos_out has completed
save_out_rdd_to_hbase(out_rdd_amandeep, hbase_man_sha1infos_out)
return out_rdd_amandeep
## try to reload rdds that could have already been computed, compute chain of dependencies if needed
# propagate down es_ts_end
update_rdd = get_update_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
if update_rdd is None:
print("[compute_out_rdd] update_rdd is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, 0, rdd_name+"_count")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
return None
## update cdr_ids, and parents cdr_ids for these new sha1s
print("[compute_out_rdd] reading from hbase_man_sha1infos_join to get sha1_infos_rdd.")
sha1_infos_rdd = hbase_man_sha1infos_join.read_hbase_table()
# we may need to merge some 'all_cdr_ids' and 'all_parent_ids'
if not sha1_infos_rdd.isEmpty():
print("[compute_out_rdd] partitioning update_rdd.")
update_rdd_partitioned = update_rdd.partitionBy(nb_partitions)
print("[compute_out_rdd] partitioning sha1_infos_rdd.")
sha1_infos_rdd_json = sha1_infos_rdd.flatMap(sha1_key_json).partitionBy(nb_partitions)
print("[compute_out_rdd] joining sha1_infos_rdd and update_rdd.")
join_rdd = update_rdd_partitioned.leftOuterJoin(sha1_infos_rdd_json).flatMap(flatten_leftjoin)
out_rdd_amandeep = join_rdd
else: # first update
out_rdd_amandeep = update_rdd
# save rdd
if c_options.save_inter_rdd:
try:
if not hdfs_file_exist(out_rdd_path):
# we should discard based on c_options.max_images_dig here actually
out_rdd_save = out_rdd_amandeep.filter(filter_out_rdd).map(out_to_amandeep_dict_str)
if not out_rdd_save.isEmpty():
out_rdd_save.saveAsSequenceFile(out_rdd_path)
save_info_incremental_update(hbase_man_update_out, incr_update_id, out_rdd_path, rdd_name+"_path")
else:
print("[compute_out_rdd] 'out_rdd_save' is empty.")
save_info_incremental_update(hbase_man_update_out, incr_update_id, "EMPTY", rdd_name+"_path")
else:
print("[compute_out_rdd] Skipped saving out_rdd. File already exists at {}.".format(out_rdd_path))
#return None
# org.apache.hadoop.mapred.FileAlreadyExistsException
except Exception as inst:
print("[compute_out_rdd] could not save rdd at {}, error was {}.".format(out_rdd_path, inst))
# save to HBase
save_out_rdd_to_hbase(out_rdd_amandeep, hbase_man_sha1infos_out)
# ## write out rdd of new images
# out_rdd = out_rdd_amandeep.flatMap(split_sha1_kv_images_discarded)
# if not out_rdd.isEmpty():
# print("[compute_out_rdd] saving 'out_rdd' to sha1_infos HBase table.")
# hbase_man_sha1infos_out.rdd2hbase(out_rdd)
# # how to be sure this as completed?
# else:
# print("[compute_out_rdd] 'out_rdd' is empty.")
return out_rdd_amandeep
def save_out_rdd_to_hbase(out_rdd_amandeep, hbase_man_sha1infos_out):
if out_rdd_amandeep is not None:
# write out rdd of new images
out_rdd = out_rdd_amandeep.flatMap(split_sha1_kv_images_discarded)
if not out_rdd.isEmpty():
print("[save_out_rdd_to_hbase] saving 'out_rdd' to sha1_infos HBase table.")
hbase_man_sha1infos_out.rdd2hbase(out_rdd)
# how to be sure this as completed?
else:
print("[save_out_rdd_to_hbase] 'out_rdd' is empty.")
else:
print("[save_out_rdd_to_hbase] 'out_rdd_amandeep' is None.")
##-------------
def incremental_update(es_man, hbase_man_ts, hbase_man_cdrinfos_out, hbase_man_sha1infos_join, hbase_man_sha1infos_out, hbase_man_s3url_sha1_in, hbase_man_s3url_sha1_out, hbase_man_update_out, nb_partitions, c_options):
# We should query to get all data from LAST day
print("Will process full day before {}".format(c_options.day_to_process))
start_date = dateutil.parser.parse(c_options.day_to_process)
# es_ts_end could be set to None if uptonow was set to True
# ES timestamp in milliseconds
es_ts_end = calendar.timegm(start_date.utctimetuple())*1000
es_ts_start = es_ts_end - day_gap
print("Will query CDR from {} to {}".format(es_ts_start, es_ts_end))
# We should propagate down es_ts_start AND es_ts_end
restart = c_options.restart
save_inter_rdd = c_options.save_inter_rdd
identifier = c_options.identifier
day_to_process = c_options.day_to_process
batch_update_size = c_options.batch_update_size
start_time = time.time()
## set incr_update_id
if c_options.restart:
if not c_options.identifier:
raise ValueError('[incremental_update: error] Trying to restart without specifying update identifier.')
incr_update_id = c_options.identifier
else:
if c_options.day_to_process:
incr_update_id = datetime.date.fromtimestamp((es_ts_start)/1000).isoformat()
else:
incr_update_id = 'incremental_update_'+str(max_ts-int(start_time*1000))
#basepath_save = '/user/skaraman/data/images_incremental_update/'+incr_update_id
basepath_save = base_incremental_path+incr_update_id+'/images/info'
if c_options.join_s3url:
## compute update for s3 urls we already now
out_join_rdd = compute_out_join_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time)
## save potential new images in out_join_rdd by batch of 10000 to be indexed?
# They should have been indexed the first time they have been seen... But download could have failed etc.
# Might be good to retry image without cu_feat_id here when indexing has catched up
#save_new_images_for_index(out_join_rdd, hbase_man_update_out, incr_update_id, batch_update_size, "new_images_to_index_join")
## compute update for new s3 urls
out_rdd = compute_out_rdd(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, hbase_man_sha1infos_out, incr_update_id, nb_partitions, c_options, start_time)
if out_rdd is not None and not out_rdd.isEmpty():
save_new_images_for_index(basepath_save, out_rdd, hbase_man_update_out, incr_update_id, batch_update_size, c_options, "new_images_to_index")
save_new_s3_url(basepath_save, es_man, es_ts_start, es_ts_end, hbase_man_cdrinfos_out, hbase_man_update_out, incr_update_id, nb_partitions, c_options)
update_elapsed_time = time.time() - start_time
save_info_incremental_update(hbase_man_update_out, incr_update_id, str(update_elapsed_time), "update_elapsed_time")
## MAIN
if __name__ == '__main__':
start_time = time.time()
# Parse options
parser = OptionParser()
parser.add_option("-r", "--restart", dest="restart", default=False, action="store_true")
parser.add_option("-i", "--identifier", dest="identifier") # redudant with day to process now...
parser.add_option("-d", "--day_to_process", dest="day_to_process", default=datetime.date.today().isoformat())
parser.add_option("-s", "--save", dest="save_inter_rdd", default=False, action="store_true")
parser.add_option("-j", "--join_s3url", dest="join_s3url", default=False, action="store_true")
parser.add_option("-u", "--uptonow", dest="uptonow", default=False, action="store_true")
parser.add_option("-b", "--batch_update_size", dest="batch_update_size", default=10000)
# expose max_images_dig so Amandeep can change that on the fly if needed
parser.add_option("-m", "--max_images_dig", dest="max_images_dig", default=50000)
# we could add options for uptonow, auto join based on number of s3_urls to download
(c_options, args) = parser.parse_args()
print "Got options:", c_options
# Read job_conf
job_conf = json.load(open("job_conf_notcommited"+dev_release_suffix+".json","rt"))
print job_conf
sc = SparkContext(appName="images_incremental_update"+dev_release_suffix)
conf = SparkConf()
log4j = sc._jvm.org.apache.log4j
log4j.LogManager.getRootLogger().setLevel(log4j.Level.ERROR)
# Set parameters job_conf
# should this be estimated from RDD counts actually?
nb_partitions = job_conf["nb_partitions"]
# HBase Conf
hbase_host = job_conf["hbase_host"]
tab_ts_name = job_conf["tab_ts_name"]
hbase_man_ts = HbaseManager(sc, conf, hbase_host, tab_ts_name)
tab_cdrid_infos_name = job_conf["tab_cdrid_infos_name"]
tab_sha1_infos_name = job_conf["tab_sha1_infos_name"]
tab_s3url_sha1_name = job_conf["tab_s3url_sha1_name"]
tab_update_name = job_conf["tab_update_name"]
# this is the maximum number of cdr_ids for an image to be saved to HBase
max_images_hbase = job_conf["max_images"]
# this is the maximum number of cdr_ids for an image to be saved to HDFS for dig
max_images_dig = c_options.max_images_dig
max_images_reduce = max(max_images_hbase, max_images_dig)
# Setup HBase managers
join_columns_list = [':'.join(x) for x in fields_list]
hbase_man_sha1infos_join = HbaseManager(sc, conf, hbase_host, tab_sha1_infos_name, columns_list=join_columns_list)
hbase_man_sha1infos_out = HbaseManager(sc, conf, hbase_host, tab_sha1_infos_name)
hbase_man_cdrinfos_out = HbaseManager(sc, conf, hbase_host, tab_cdrid_infos_name)
hbase_man_update_out = HbaseManager(sc, conf, hbase_host, tab_update_name, time_sleep=time_sleep_update_out)
# actually only needed if join_s3url is True
hbase_man_s3url_sha1_in = HbaseManager(sc, conf, hbase_host, tab_s3url_sha1_name)
hbase_man_s3url_sha1_out = HbaseManager(sc, conf, hbase_host, tab_s3url_sha1_name)
# ES conf
es_index = job_conf["es_index"]
es_domain = job_conf["es_domain"]
es_host = job_conf["es_host"]
es_port = job_conf["es_port"]
es_user = job_conf["es_user"]
es_pass = job_conf["es_pass"]
# deprecated
#es_ts_start = job_conf["query_timestamp_start"]
# Setup ES manager
es_man = ES(sc, conf, es_index, es_domain, es_host, es_port, es_user, es_pass)
es_man.set_output_json()
es_man.set_read_metadata()
# Run update
incremental_update(es_man, hbase_man_ts, hbase_man_cdrinfos_out, hbase_man_sha1infos_join, hbase_man_sha1infos_out, hbase_man_s3url_sha1_in, hbase_man_s3url_sha1_out, hbase_man_update_out, nb_partitions, c_options)
print("[DONE] Update for day {} done in {}s.".format(c_options.day_to_process, time.time() - start_time))
|
bsd-2-clause
| 2,145,436,833,996,540,200
| 48.189799
| 322
| 0.640768
| false
| 2.949662
| false
| false
| false
|
twitter/pants
|
src/python/pants/base/worker_pool.py
|
1
|
8243
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import multiprocessing
import threading
from builtins import next, object
from multiprocessing.pool import ThreadPool
from future.moves import _thread
from pants.reporting.report import Report
class Work(object):
"""Represents multiple concurrent calls to the same callable."""
def __init__(self, func, args_tuples, workunit_name=None):
# A callable.
self.func = func
# A list of tuples of args. func will be called once per tuple, concurrently.
# The length of this list is the cardinality of the work.
self.args_tuples = args_tuples
# If specified, each invocation will be executed in a workunit of this name.
self.workunit_name = workunit_name
class WorkerPool(object):
"""A pool of workers.
Workers are threads, and so are subject to GIL constraints. Submitting CPU-bound work
may not be effective. Use this class primarily for IO-bound work.
"""
def __init__(self, parent_workunit, run_tracker, num_workers):
self._run_tracker = run_tracker
# All workers accrue work to the same root.
self._pool = ThreadPool(processes=num_workers,
initializer=self._run_tracker.register_thread,
initargs=(parent_workunit, ))
# We mustn't shutdown when there are pending workchains, as they may need to submit work
# in the future, and the pool doesn't know about this yet.
self._pending_workchains = 0
self._pending_workchains_cond = threading.Condition() # Protects self._pending_workchains.
self._shutdown_hooks = []
self.num_workers = num_workers
def add_shutdown_hook(self, hook):
self._shutdown_hooks.append(hook)
def submit_async_work(self, work, workunit_parent=None, on_success=None, on_failure=None):
"""Submit work to be executed in the background.
:param work: The work to execute.
:param workunit_parent: If specified, work is accounted for under this workunit.
:param on_success: If specified, a callable taking a single argument, which will be a list
of return values of each invocation, in order. Called only if all work succeeded.
:param on_failure: If specified, a callable taking a single argument, which is an exception
thrown in the work.
:return: `multiprocessing.pool.MapResult`
Don't do work in on_success: not only will it block the result handling thread, but
that thread is not a worker and doesn't have a logging context etc. Use it just to
submit further work to the pool.
"""
if work is None or len(work.args_tuples) == 0: # map_async hangs on 0-length iterables.
if on_success:
on_success([])
else:
def do_work(*args):
self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent, on_failure=on_failure)
return self._pool.map_async(do_work, work.args_tuples, chunksize=1, callback=on_success)
def submit_async_work_chain(self, work_chain, workunit_parent, done_hook=None):
"""Submit work to be executed in the background.
- work_chain: An iterable of Work instances. Will be invoked serially. Each instance may
have a different cardinality. There is no output-input chaining: the argument
tuples must already be present in each work instance. If any work throws an
exception no subsequent work in the chain will be attempted.
- workunit_parent: Work is accounted for under this workunit.
- done_hook: If not None, invoked with no args after all work is done, or on error.
"""
def done():
if done_hook:
done_hook()
with self._pending_workchains_cond:
self._pending_workchains -= 1
self._pending_workchains_cond.notify()
def error(e):
done()
self._run_tracker.log(Report.ERROR, '{}'.format(e))
# We filter out Nones defensively. There shouldn't be any, but if a bug causes one,
# Pants might hang indefinitely without this filtering.
work_iter = (_f for _f in work_chain if _f)
def submit_next():
try:
self.submit_async_work(next(work_iter), workunit_parent=workunit_parent,
on_success=lambda x: submit_next(), on_failure=error)
except StopIteration:
done() # The success case.
with self._pending_workchains_cond:
self._pending_workchains += 1
try:
submit_next()
except Exception as e: # Handles errors in the submission code.
done()
self._run_tracker.log(Report.ERROR, '{}'.format(e))
raise
def submit_work_and_wait(self, work, workunit_parent=None):
"""Submit work to be executed on this pool, but wait for it to complete.
- work: The work to execute.
- workunit_parent: If specified, work is accounted for under this workunit.
Returns a list of return values of each invocation, in order. Throws if any invocation does.
"""
if work is None or len(work.args_tuples) == 0: # map hangs on 0-length iterables.
return []
else:
def do_work(*args):
return self._do_work(work.func, *args, workunit_name=work.workunit_name,
workunit_parent=workunit_parent)
# We need to specify a timeout explicitly, because otherwise python ignores SIGINT when waiting
# on a condition variable, so we won't be able to ctrl-c out.
return self._pool.map_async(do_work, work.args_tuples, chunksize=1).get(timeout=1000000000)
def _do_work(self, func, args_tuple, workunit_name, workunit_parent, on_failure=None):
try:
if workunit_name:
with self._run_tracker.new_workunit_under_parent(name=workunit_name, parent=workunit_parent):
return func(*args_tuple)
else:
return func(*args_tuple)
except KeyboardInterrupt:
# If a worker thread intercepts a KeyboardInterrupt, we want to propagate it to the main
# thread.
_thread.interrupt_main()
raise
except Exception as e:
if on_failure:
# Note that here the work's workunit is closed. So, e.g., it's OK to use on_failure()
# to close an ancestor workunit.
on_failure(e)
raise
def shutdown(self):
with self._pending_workchains_cond:
while self._pending_workchains > 0:
self._pending_workchains_cond.wait()
self._pool.close()
self._pool.join()
for hook in self._shutdown_hooks:
hook()
def abort(self):
self._pool.terminate()
class SubprocPool(object):
"""Singleton for managing multiprocessing.Pool instances
Subprocesses (including multiprocessing.Pool workers) can inherit locks in poorly written
libraries (eg zlib) if other threads in the parent process happen to be holding them at the
moment the worker is fork()'ed. Thus it is important to create any subprocesses BEFORE
starting any threads, or they may deadlock mysteriously when sent a particular piece of work.
This is accomplished in pants by these initializing pools early, when creating the RunTracker.
However, in tests, RunTrackers are created repeatedly, as part of creating Contexts that
are used briefly and discarded. Creating a new subprocess pool every time is expensive, and will
lead to os.fork failing once too many processes are spawned.
To avoid this, the pools themselves are kept in this singleton and new RunTrackers re-use them.
"""
_pool = None
_lock = threading.Lock()
_num_processes = multiprocessing.cpu_count()
@classmethod
def set_num_processes(cls, num_processes):
cls._num_processes = num_processes
@classmethod
def foreground(cls):
with cls._lock:
if cls._pool is None:
cls._pool = ThreadPool(processes=cls._num_processes)
return cls._pool
@classmethod
def shutdown(cls, force):
with cls._lock:
old = cls._pool
cls._pool = None
if old:
if force:
old.terminate()
else:
old.close()
old.join()
|
apache-2.0
| -2,454,205,278,918,337,000
| 37.339535
| 101
| 0.678151
| false
| 3.978282
| false
| false
| false
|
hycis/Pynet
|
pynet/cost.py
|
1
|
3580
|
__author__ = "Zhenzhou Wu"
__copyright__ = "Copyright 2012, Zhenzhou Wu"
__credits__ = ["Zhenzhou Wu"]
__license__ = "3-clause BSD"
__email__ = "hyciswu@gmail.com"
__maintainer__ = "Zhenzhou Wu"
import theano.tensor as T
import theano
from pynet.utils.utils import theano_unique
floatX = theano.config.floatX
class Cost(object):
"""
Cost inherits MLP so that cost can make use of the
"""
def __init__(self, type = 'nll'):
self.type = type
def get_accuracy(self, y, y_pred):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', y_pred.type))
rval = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1)).sum() / y.shape[0]
return rval.astype(floatX)
def positives(self, y, y_pred):
"""
return the number of correctly predicted examples in a batch
"""
rval = T.eq(y_pred.argmax(axis=1), y.argmax(axis=1)).sum()
return rval.astype(floatX)
def get_batch_cost(self, y, y_pred):
return getattr(self, '_batch_cost_' + self.type)(y, y_pred)
def _batch_cost_nll(self, y, y_pred):
"""
return the total cost of all the examples in a batch
"""
rval = T.sum(T.log(y_pred)[T.arange(y.shape[0]), y.argmin(axis=1)])
return rval.astype(floatX)
def confusion_matrix(self, y, y_pred):
#TODO
pass
def get_cost(self, y, y_pred):
return getattr(self, '_cost_' + self.type)(y, y_pred)
def _cost_mse(self, y, y_pred):
L = T.sum(T.sqr(y - y_pred), axis=1)
rval = T.mean(L)
return rval.astype(floatX)
def _cost_entropy(self, y, y_pred):
L = - T.sum(y * T.log(y_pred) + (1-y) * T.log(1-y_pred), axis=1)
rval = T.mean(L)
return rval.astype(floatX)
def _cost_error(self, y, y_pred):
L = T.neq(y_pred.argmax(axis=1), y.argmax(axis=1))
rval = T.mean(L)
return rval.astype(floatX)
def _cost_f1(self, y, y_pred):
#TODO
pass
def _cost_binary_misprecision(self, y, y_pred):
'''
This cost function is only for binary classifications
'''
# assert(theano_unique(y).size == 2)
y_pred = y_pred.argmax(axis=1)
y = y.argmax(axis=1)
TP = (y_pred and y).astype(floatX)
y0 = T.eq(y, 0)
FP = (y0 and y_pred).astype(floatX)
TP = T.sum(TP)
FP = T.sum(FP)
rval = FP / (TP + FP)
return rval
def _cost_FP_minus_TP(self, y, y_pred):
'''
This cost function is only for binary classifications
'''
# assert(theano_unique(y).size == 2)
y_pred = y_pred.argmax(axis=1)
y = y.argmax(axis=1)
TP = (y_pred and y).astype(floatX)
y0 = T.eq(y, 0)
FP = (y0 and y_pred).astype(floatX)
TP = T.mean(TP)
FP = T.mean(FP)
return FP - TP
def _cost_recall(self, y, y_pred):
#TODO
pass
def _cost_abs(self, y, y_pred):
L = T.sum(T.abs_(y - y_pred, axis=1))
rval = T.mean(L)
return rval.astype(floatX)
|
apache-2.0
| -6,236,967,485,249,839,000
| 26.96875
| 79
| 0.550279
| false
| 3.176575
| false
| false
| false
|
3liz/QuickOSM
|
QuickOSM/core/api/connexion_oapi.py
|
1
|
6275
|
"""Manage Overpass API connexion."""
import logging
import os
import re
from typing import List
from qgis.core import QgsFileDownloader
from qgis.PyQt.QtCore import QDir, QEventLoop, QFileInfo, QTemporaryFile, QUrl
from QuickOSM.core.exceptions import (
NetWorkErrorException,
OverpassBadRequestException,
OverpassManyRequestException,
OverpassMemoryException,
OverpassRuntimeError,
OverpassTimeoutException,
)
__copyright__ = 'Copyright 2019, 3Liz'
__license__ = 'GPL version 3'
__email__ = 'info@3liz.org'
LOGGER = logging.getLogger('QuickOSM')
class ConnexionOAPI:
"""
Manage connexion to the overpass API.
"""
def __init__(self, url: str, convert: bool = False):
"""Constructor of query.
:param url:Full URL of OverPass Query with the query encoded in it.
:type url:str
"""
self._url = QUrl(url)
if convert:
temporary = QTemporaryFile(
os.path.join(QDir.tempPath(), 'request-XXXXXX.txt'))
else:
temporary = QTemporaryFile(
os.path.join(QDir.tempPath(), 'request-XXXXXX.osm'))
temporary.open()
self.result_path = temporary.fileName()
temporary.close()
self.errors = []
def error(self, messages):
self.errors = messages
@staticmethod
def canceled():
LOGGER.info('Request canceled')
# TODO, need to handle this to stop the process.
@staticmethod
def completed():
LOGGER.info('Request completed')
def run_convert(self):
loop = QEventLoop()
downloader = QgsFileDownloader(
self._url, self.result_path, delayStart=True)
downloader.downloadExited.connect(loop.quit)
downloader.downloadError.connect(self.error)
downloader.downloadCanceled.connect(self.canceled)
downloader.downloadCompleted.connect(self.completed)
downloader.startDownload()
loop.exec_()
with open(self.result_path, encoding='utf8') as txt_file:
text = txt_file.read()
query = re.findall("<pre>\\n(.*?)</pre>", text)[0]
return query
def run(self):
"""Run the query.
@raise OverpassBadRequestException,NetWorkErrorException,
OverpassTimeoutException
@return: The result of the query.
@rtype: str
"""
loop = QEventLoop()
downloader = QgsFileDownloader(
self._url, self.result_path, delayStart=True)
downloader.downloadExited.connect(loop.quit)
downloader.downloadError.connect(self.error)
downloader.downloadCanceled.connect(self.canceled)
downloader.downloadCompleted.connect(self.completed)
downloader.startDownload()
loop.exec_()
for message in self.errors:
self.is_query_timed_out(message)
self.too_many_request(message)
self.is_bad_request(message)
LOGGER.error(message)
if len(self.errors):
raise NetWorkErrorException('Overpass API', ', '.join(self.errors))
osm_file = QFileInfo(self.result_path)
if not osm_file.exists() and not osm_file.isFile():
# Do not raise a QuickOSM exception here
# It must be a bug from QuickOSM
raise FileNotFoundError
self.check_file(self.result_path)
# Everything went fine
return self.result_path
@staticmethod
def check_file(path: str):
# The download is done, checking for not complete OSM file.
# Overpass might aborted the request with HTTP 200.
LOGGER.info('Checking OSM file content {}'.format(path))
def last_lines(file_path: str, line_count: int) -> List[str]:
bufsize = 8192
fsize = os.stat(file_path).st_size
iteration = 0
with open(file_path, encoding='utf8') as f:
if bufsize > fsize:
bufsize = fsize - 1
data = []
while True:
iteration += 1
seek_size = fsize - bufsize * iteration
if seek_size < 0:
seek_size = 0
f.seek(seek_size)
data.extend(f.readlines())
if len(data) >= line_count or f.tell() == 0:
line_content = data[-line_count:]
return line_content
else:
return list(f.readlines())
lines = last_lines(path, 10)
# Check if we can use the static method below
timeout = (
'<remark> runtime error: Query timed out in "[a-z]+" at line '
'[\d]+ after ([\d]+) seconds. </remark>')
if re.search(timeout, ''.join(lines)):
raise OverpassTimeoutException
memory = (
'<remark> runtime error: Query ran out of memory in "query" at '
'line [\d]+. It would need at least ([\d]+) (.*) of RAM to '
'continue. </remark>')
search = re.search(memory, ''.join(lines))
if search:
raise OverpassMemoryException(search.group(1), search.group(2))
generic = (
'<remark> runtime error: (.*)</remark>')
search = re.search(generic, ''.join(lines))
if search:
raise OverpassRuntimeError(search.group(1))
@staticmethod
def is_query_timed_out(string: str):
text = 'Network request (.*) timed out'
search = re.search(text, string)
if search:
raise OverpassTimeoutException
@staticmethod
def too_many_request(string: str):
text = '(.*)server replied: Too Many Requests'
search = re.search(text, string)
if search:
raise OverpassManyRequestException
@staticmethod
def is_bad_request(string: str):
text = '(.*)server replied: Bad Request'
search = re.search(text, string)
if search:
raise OverpassBadRequestException
text = '(.*)server replied: Forbidden'
search = re.search(text, string)
if search:
raise OverpassBadRequestException
|
gpl-2.0
| 4,600,529,764,808,338,400
| 31.179487
| 79
| 0.578327
| false
| 4.214238
| false
| false
| false
|
OpenMined/PySyft
|
packages/syft/src/syft/proto/core/node/common/action/exception_action_pb2.py
|
1
|
6244
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/node/common/action/exception_action.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
from syft.proto.core.io import address_pb2 as proto_dot_core_dot_io_dot_address__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/core/node/common/action/exception_action.proto",
package="syft.core.node.common.service",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n4proto/core/node/common/action/exception_action.proto\x12\x1dsyft.core.node.common.service\x1a%proto/core/common/common_object.proto\x1a\x1bproto/core/io/address.proto"\xc9\x01\n\x10\x45xceptionMessage\x12%\n\x06msg_id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12&\n\x07\x61\x64\x64ress\x18\x02 \x01(\x0b\x32\x15.syft.core.io.Address\x12\x37\n\x18msg_id_causing_exception\x18\x03 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x16\n\x0e\x65xception_type\x18\x04 \x01(\t\x12\x15\n\rexception_msg\x18\x05 \x01(\tb\x06proto3',
dependencies=[
proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR,
proto_dot_core_dot_io_dot_address__pb2.DESCRIPTOR,
],
)
_EXCEPTIONMESSAGE = _descriptor.Descriptor(
name="ExceptionMessage",
full_name="syft.core.node.common.service.ExceptionMessage",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="msg_id",
full_name="syft.core.node.common.service.ExceptionMessage.msg_id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="syft.core.node.common.service.ExceptionMessage.address",
index=1,
number=2,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="msg_id_causing_exception",
full_name="syft.core.node.common.service.ExceptionMessage.msg_id_causing_exception",
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="exception_type",
full_name="syft.core.node.common.service.ExceptionMessage.exception_type",
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="exception_msg",
full_name="syft.core.node.common.service.ExceptionMessage.exception_msg",
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=156,
serialized_end=357,
)
_EXCEPTIONMESSAGE.fields_by_name[
"msg_id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
_EXCEPTIONMESSAGE.fields_by_name[
"address"
].message_type = proto_dot_core_dot_io_dot_address__pb2._ADDRESS
_EXCEPTIONMESSAGE.fields_by_name[
"msg_id_causing_exception"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
DESCRIPTOR.message_types_by_name["ExceptionMessage"] = _EXCEPTIONMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ExceptionMessage = _reflection.GeneratedProtocolMessageType(
"ExceptionMessage",
(_message.Message,),
{
"DESCRIPTOR": _EXCEPTIONMESSAGE,
"__module__": "proto.core.node.common.action.exception_action_pb2"
# @@protoc_insertion_point(class_scope:syft.core.node.common.service.ExceptionMessage)
},
)
_sym_db.RegisterMessage(ExceptionMessage)
# @@protoc_insertion_point(module_scope)
|
apache-2.0
| 2,134,787,039,885,428,000
| 34.477273
| 547
| 0.618834
| false
| 3.630233
| false
| true
| false
|
MG-RAST/Shock
|
shock-server/plug-ins/boto-s3-download.py
|
1
|
2570
|
#!/usr/bin/python
# boto3 python client to download files from S3 and check md5
# AWS_ACCESS_KEY_ID .. The access key for your AWS account.
# AWS_SECRET_ACCESS_KEY .. The secret key for your AWS account.
# folker@anl.gov
import sys, getopt, boto3, hashlib, io
import argparse
def md5sum(src, length=io.DEFAULT_BUFFER_SIZE):
md5 = hashlib.md5()
with io.open(src, mode="rb") as fd:
for chunk in iter(lambda: fd.read(length), b''):
md5.update(chunk)
return md5.hexdigest()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a","--keyid", default=None, help=" aws_access_key_id")
parser.add_argument("-b","--bucket", default=None, help="AWS bucket")
parser.add_argument("-t","--tmpfile", default=None,help="filename to create")
parser.add_argument("-o","--objectname", default=None,help="object to download")
parser.add_argument("-k","--accesskey", default=None, help="aws_secret_access_key")
parser.add_argument("-v", "--verbose", action="count", default=0, help="increase output verbosity")
parser.add_argument("-r","--region", default=None, help="AWS region")
parser.add_argument("-s","--s3endpoint", default="https://s3.it.anl.gov:18082")
args = parser.parse_args()
# if args.region is '':
# args.region=' '
if args.verbose:
print ('keyId is =', args.keyid)
print ('accessKey is =', args.accesskey)
print ('bucket is =', args.bucket)
print ('tmpfile is =', args.tmpfile)
print ('region is=', args.region)
print ('object is =', args.objectname)
if args.tmpfile is None:
print ('we need a filename')
sys.exit(2)
# if passed use credentials to establish connection
if args.accesskey is None:
if args.verbose:
print ('using existing credentials from ENV vars or files')
s3 = boto3.client('s3',
endpoint_url=args.s3endpoint,
region_name=args.region
)
else:
# use env. default for connection details --> see https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
if args.verbose:
print ('using credentials from cmd-line')
s3 = boto3.client('s3',
endpoint_url=args.s3endpoint,
region_name=args.region,
aws_access_key_id=args.keyid,
aws_secret_access_key=args.accesskey
)
with open(args.tmpfile, 'wb') as f:
s3.download_fileobj(args.bucket, args.objectname, f)
md5_new = md5sum(args.tmpfile)
print(md5_new)
sys.exit(0)
main()
|
bsd-2-clause
| 3,889,921,259,526,281,700
| 32.38961
| 134
| 0.638911
| false
| 3.422104
| false
| false
| false
|
laughingman7743/PyAthena
|
tests/test_sqlalchemy_athena.py
|
1
|
20111
|
# -*- coding: utf-8 -*-
import re
import unittest
import uuid
from datetime import date, datetime
from decimal import Decimal
from urllib.parse import quote_plus
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy import String
from sqlalchemy.engine import create_engine
from sqlalchemy.exc import NoSuchTableError, OperationalError, ProgrammingError
from sqlalchemy.sql import expression
from sqlalchemy.sql.schema import Column, MetaData, Table
from sqlalchemy.sql.sqltypes import (
BIGINT,
BINARY,
BOOLEAN,
DATE,
DECIMAL,
FLOAT,
INTEGER,
STRINGTYPE,
TIMESTAMP,
)
from tests.conftest import ENV, SCHEMA
from tests.util import with_engine
class TestSQLAlchemyAthena(unittest.TestCase):
"""Reference test case is following:
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/sqlalchemy_test_case.py
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_hive.py
https://github.com/dropbox/PyHive/blob/master/pyhive/tests/test_sqlalchemy_presto.py
"""
def create_engine(self, **kwargs):
conn_str = (
"awsathena+rest://athena.{region_name}.amazonaws.com:443/"
+ "{schema_name}?s3_staging_dir={s3_staging_dir}&s3_dir={s3_dir}"
+ "&compression=snappy"
)
if "verify" in kwargs:
conn_str += "&verify={verify}"
if "duration_seconds" in kwargs:
conn_str += "&duration_seconds={duration_seconds}"
if "poll_interval" in kwargs:
conn_str += "&poll_interval={poll_interval}"
if "kill_on_interrupt" in kwargs:
conn_str += "&kill_on_interrupt={kill_on_interrupt}"
return create_engine(
conn_str.format(
region_name=ENV.region_name,
schema_name=SCHEMA,
s3_staging_dir=quote_plus(ENV.s3_staging_dir),
s3_dir=quote_plus(ENV.s3_staging_dir),
**kwargs
)
)
@with_engine()
def test_basic_query(self, engine, conn):
rows = conn.execute("SELECT * FROM one_row").fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0].number_of_rows, 1)
self.assertEqual(len(rows[0]), 1)
@with_engine()
def test_reflect_no_such_table(self, engine, conn):
self.assertRaises(
NoSuchTableError,
lambda: Table("this_does_not_exist", MetaData(bind=engine), autoload=True),
)
self.assertRaises(
NoSuchTableError,
lambda: Table(
"this_does_not_exist",
MetaData(bind=engine),
schema="also_does_not_exist",
autoload=True,
),
)
@with_engine()
def test_reflect_table(self, engine, conn):
one_row = Table("one_row", MetaData(bind=engine), autoload=True)
self.assertEqual(len(one_row.c), 1)
self.assertIsNotNone(one_row.c.number_of_rows)
@with_engine()
def test_reflect_table_with_schema(self, engine, conn):
one_row = Table("one_row", MetaData(bind=engine), schema=SCHEMA, autoload=True)
self.assertEqual(len(one_row.c), 1)
self.assertIsNotNone(one_row.c.number_of_rows)
@with_engine()
def test_reflect_table_include_columns(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine))
version = float(
re.search(r"^([\d]+\.[\d]+)\..+", sqlalchemy.__version__).group(1)
)
if version <= 1.2:
engine.dialect.reflecttable(
conn, one_row_complex, include_columns=["col_int"], exclude_columns=[]
)
elif version == 1.3:
# https://docs.sqlalchemy.org/en/13/changelog/changelog_13.html
# #change-64ac776996da1a5c3e3460b4c0f0b257
engine.dialect.reflecttable(
conn,
one_row_complex,
include_columns=["col_int"],
exclude_columns=[],
resolve_fks=True,
)
else: # version >= 1.4
# https://docs.sqlalchemy.org/en/14/changelog/changelog_14.html
# #change-0215fae622c01f9409eb1ba2754f4792
# https://docs.sqlalchemy.org/en/14/core/reflection.html
# #sqlalchemy.engine.reflection.Inspector.reflect_table
insp = sqlalchemy.inspect(engine)
insp.reflect_table(
one_row_complex,
include_columns=["col_int"],
exclude_columns=[],
resolve_fks=True,
)
self.assertEqual(len(one_row_complex.c), 1)
self.assertIsNotNone(one_row_complex.c.col_int)
self.assertRaises(AttributeError, lambda: one_row_complex.c.col_tinyint)
@with_engine()
def test_unicode(self, engine, conn):
unicode_str = "密林"
one_row = Table("one_row", MetaData(bind=engine))
returned_str = sqlalchemy.select(
[expression.bindparam("あまぞん", unicode_str, type_=String())],
from_obj=one_row,
).scalar()
self.assertEqual(returned_str, unicode_str)
@with_engine()
def test_reflect_schemas(self, engine, conn):
insp = sqlalchemy.inspect(engine)
schemas = insp.get_schema_names()
self.assertIn(SCHEMA, schemas)
self.assertIn("default", schemas)
@with_engine()
def test_get_table_names(self, engine, conn):
meta = MetaData()
meta.reflect(bind=engine)
print(meta.tables)
self.assertIn("one_row", meta.tables)
self.assertIn("one_row_complex", meta.tables)
insp = sqlalchemy.inspect(engine)
self.assertIn(
"many_rows",
insp.get_table_names(schema=SCHEMA),
)
@with_engine()
def test_has_table(self, engine, conn):
insp = sqlalchemy.inspect(engine)
self.assertTrue(insp.has_table("one_row", schema=SCHEMA))
self.assertFalse(insp.has_table("this_table_does_not_exist", schema=SCHEMA))
@with_engine()
def test_get_columns(self, engine, conn):
insp = sqlalchemy.inspect(engine)
actual = insp.get_columns(table_name="one_row", schema=SCHEMA)[0]
self.assertEqual(actual["name"], "number_of_rows")
self.assertTrue(isinstance(actual["type"], INTEGER))
self.assertTrue(actual["nullable"])
self.assertIsNone(actual["default"])
self.assertEqual(actual["ordinal_position"], 1)
self.assertIsNone(actual["comment"])
@with_engine()
def test_char_length(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True)
result = (
sqlalchemy.select(
[sqlalchemy.func.char_length(one_row_complex.c.col_string)]
)
.execute()
.scalar()
)
self.assertEqual(result, len("a string"))
@with_engine()
def test_reflect_select(self, engine, conn):
one_row_complex = Table("one_row_complex", MetaData(bind=engine), autoload=True)
self.assertEqual(len(one_row_complex.c), 15)
self.assertIsInstance(one_row_complex.c.col_string, Column)
rows = one_row_complex.select().execute().fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(
list(rows[0]),
[
True,
127,
32767,
2147483647,
9223372036854775807,
0.5,
0.25,
"a string",
datetime(2017, 1, 1, 0, 0, 0),
date(2017, 1, 2),
b"123",
"[1, 2]",
"{1=2, 3=4}",
"{a=1, b=2}",
Decimal("0.1"),
],
)
self.assertIsInstance(one_row_complex.c.col_boolean.type, BOOLEAN)
self.assertIsInstance(one_row_complex.c.col_tinyint.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_smallint.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_int.type, INTEGER)
self.assertIsInstance(one_row_complex.c.col_bigint.type, BIGINT)
self.assertIsInstance(one_row_complex.c.col_float.type, FLOAT)
self.assertIsInstance(one_row_complex.c.col_double.type, FLOAT)
self.assertIsInstance(one_row_complex.c.col_string.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_timestamp.type, TIMESTAMP)
self.assertIsInstance(one_row_complex.c.col_date.type, DATE)
self.assertIsInstance(one_row_complex.c.col_binary.type, BINARY)
self.assertIsInstance(one_row_complex.c.col_array.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_map.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_struct.type, type(STRINGTYPE))
self.assertIsInstance(one_row_complex.c.col_decimal.type, DECIMAL)
@with_engine()
def test_reserved_words(self, engine, conn):
"""Presto uses double quotes, not backticks"""
fake_table = Table(
"select", MetaData(bind=engine), Column("current_timestamp", STRINGTYPE)
)
query = str(fake_table.select(fake_table.c.current_timestamp == "a"))
self.assertIn('"select"', query)
self.assertIn('"current_timestamp"', query)
self.assertNotIn("`select`", query)
self.assertNotIn("`current_timestamp`", query)
@with_engine()
def test_retry_if_data_catalog_exception(self, engine, conn):
dialect = engine.dialect
exc = OperationalError(
"", None, "Database does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError(
"", None, "Namespace does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError(
"", None, "Table does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertTrue(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
exc = OperationalError("", None, "foobar.")
self.assertTrue(
dialect._retry_if_data_catalog_exception(exc, "foobar", "foobar")
)
exc = ProgrammingError(
"", None, "Database does_not_exist not found. Please check your query."
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "does_not_exist", "this_does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "does_not_exist"
)
)
self.assertFalse(
dialect._retry_if_data_catalog_exception(
exc, "this_does_not_exist", "this_does_not_exist"
)
)
@with_engine()
def test_get_column_type(self, engine, conn):
dialect = engine.dialect
self.assertEqual(dialect._get_column_type("boolean"), "boolean")
self.assertEqual(dialect._get_column_type("tinyint"), "tinyint")
self.assertEqual(dialect._get_column_type("smallint"), "smallint")
self.assertEqual(dialect._get_column_type("integer"), "integer")
self.assertEqual(dialect._get_column_type("bigint"), "bigint")
self.assertEqual(dialect._get_column_type("real"), "real")
self.assertEqual(dialect._get_column_type("double"), "double")
self.assertEqual(dialect._get_column_type("varchar"), "varchar")
self.assertEqual(dialect._get_column_type("timestamp"), "timestamp")
self.assertEqual(dialect._get_column_type("date"), "date")
self.assertEqual(dialect._get_column_type("varbinary"), "varbinary")
self.assertEqual(dialect._get_column_type("array(integer)"), "array")
self.assertEqual(dialect._get_column_type("map(integer, integer)"), "map")
self.assertEqual(dialect._get_column_type("row(a integer, b integer)"), "row")
self.assertEqual(dialect._get_column_type("decimal(10,1)"), "decimal")
@with_engine()
def test_contain_percents_character_query(self, engine, conn):
select = sqlalchemy.sql.text(
"""
SELECT date_parse('20191030', '%Y%m%d')
"""
)
table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()
query = sqlalchemy.select(["*"]).select_from(table_expression)
result = engine.execute(query)
self.assertEqual(result.fetchall(), [(datetime(2019, 10, 30),)])
query_with_limit = (
sqlalchemy.sql.select(["*"]).select_from(table_expression).limit(1)
)
result_with_limit = engine.execute(query_with_limit)
self.assertEqual(result_with_limit.fetchall(), [(datetime(2019, 10, 30),)])
@with_engine()
def test_query_with_parameter(self, engine, conn):
select = sqlalchemy.sql.text(
"""
SELECT :word
"""
)
table_expression = sqlalchemy.sql.selectable.TextAsFrom(select, []).cte()
query = sqlalchemy.select(["*"]).select_from(table_expression)
result = engine.execute(query, word="cat")
self.assertEqual(result.fetchall(), [("cat",)])
query_with_limit = (
sqlalchemy.select(["*"]).select_from(table_expression).limit(1)
)
result_with_limit = engine.execute(query_with_limit, word="cat")
self.assertEqual(result_with_limit.fetchall(), [("cat",)])
@with_engine()
def test_contain_percents_character_query_with_parameter(self, engine, conn):
select1 = sqlalchemy.sql.text(
"""
SELECT date_parse('20191030', '%Y%m%d'), :word
"""
)
table_expression1 = sqlalchemy.sql.selectable.TextAsFrom(select1, []).cte()
query1 = sqlalchemy.select(["*"]).select_from(table_expression1)
result1 = engine.execute(query1, word="cat")
self.assertEqual(result1.fetchall(), [(datetime(2019, 10, 30), "cat")])
query_with_limit1 = (
sqlalchemy.select(["*"]).select_from(table_expression1).limit(1)
)
result_with_limit1 = engine.execute(query_with_limit1, word="cat")
self.assertEqual(
result_with_limit1.fetchall(), [(datetime(2019, 10, 30), "cat")]
)
select2 = sqlalchemy.sql.text(
"""
SELECT col_string, :param FROM one_row_complex
WHERE col_string LIKE 'a%' OR col_string LIKE :param
"""
)
table_expression2 = sqlalchemy.sql.selectable.TextAsFrom(select2, []).cte()
query2 = sqlalchemy.select(["*"]).select_from(table_expression2)
result2 = engine.execute(query2, param="b%")
self.assertEqual(result2.fetchall(), [("a string", "b%")])
query_with_limit2 = (
sqlalchemy.select(["*"]).select_from(table_expression2).limit(1)
)
result_with_limit2 = engine.execute(query_with_limit2, param="b%")
self.assertEqual(result_with_limit2.fetchall(), [("a string", "b%")])
@with_engine()
def test_nan_checks(self, engine, conn):
dialect = engine.dialect
self.assertFalse(dialect._is_nan("string"))
self.assertFalse(dialect._is_nan(1))
self.assertTrue(dialect._is_nan(float("nan")))
@with_engine()
def test_to_sql(self, engine, conn):
# TODO pyathena.error.OperationalError: SYNTAX_ERROR: line 1:305:
# Column 'foobar' cannot be resolved.
# def _format_bytes(formatter, escaper, val):
# return val.decode()
table_name = "to_sql_{0}".format(str(uuid.uuid4()).replace("-", ""))
df = pd.DataFrame(
{
"col_int": np.int32([1]),
"col_bigint": np.int64([12345]),
"col_float": np.float32([1.0]),
"col_double": np.float64([1.2345]),
"col_string": ["a"],
"col_boolean": np.bool_([True]),
"col_timestamp": [datetime(2020, 1, 1, 0, 0, 0)],
"col_date": [date(2020, 12, 31)],
# "col_binary": "foobar".encode(),
}
)
# Explicitly specify column order
df = df[
[
"col_int",
"col_bigint",
"col_float",
"col_double",
"col_string",
"col_boolean",
"col_timestamp",
"col_date",
# "col_binary",
]
]
df.to_sql(
table_name,
engine,
schema=SCHEMA,
index=False,
if_exists="replace",
method="multi",
)
table = Table(table_name, MetaData(bind=engine), autoload=True)
self.assertEqual(
table.select().execute().fetchall(),
[
(
1,
12345,
1.0,
1.2345,
"a",
True,
datetime(2020, 1, 1, 0, 0, 0),
date(2020, 12, 31),
# "foobar".encode(),
)
],
)
@with_engine(verify="false")
def test_conn_str_verify(self, engine, conn):
kwargs = conn.connection._kwargs
self.assertFalse(kwargs["verify"])
@with_engine(duration_seconds="1800")
def test_conn_str_duration_seconds(self, engine, conn):
kwargs = conn.connection._kwargs
self.assertEqual(kwargs["duration_seconds"], 1800)
@with_engine(poll_interval="5")
def test_conn_str_poll_interval(self, engine, conn):
self.assertEqual(conn.connection.poll_interval, 5)
@with_engine(kill_on_interrupt="false")
def test_conn_str_kill_on_interrupt(self, engine, conn):
self.assertFalse(conn.connection.kill_on_interrupt)
|
mit
| -7,513,084,211,599,442,000
| 36.568224
| 88
| 0.565501
| false
| 3.862221
| true
| false
| false
|
rohanpm/qingfanyi
|
qingfanyi/process/translate.py
|
1
|
3333
|
# coding=utf-8
# qingfanyi - Chinese to English translation tool
# Copyright (C) 2016 Rohan McGovern <rohan@mcgovern.id.au>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import traceback
from gi.repository import GLib
from gi.repository import Gtk
from pyatspi import Registry
import qingfanyi.styles
from qingfanyi import debug
from qingfanyi.dict import Dict
from qingfanyi.popup_manager import PopupManager
from qingfanyi.snapshot import Snapshot
from qingfanyi.snapshot_matcher import SnapshotMatcher
from qingfanyi.translate_window import TranslateWindow
from qingfanyi.wm import active_window
class Translate(object):
def __init__(self):
self.dic = Dict()
self.dic.open()
self.condvar = threading.Condition()
self.error = None
def run_in_other_thread(self):
self.condvar.acquire()
GLib.idle_add(self.run_in_this_thread)
self.condvar.wait()
debug('run in other thread done')
if self.error:
raise self.error
def run_in_this_thread(self):
self.condvar.acquire()
self.error = None
try:
self.run()
except Exception as e:
traceback.print_exc()
self.error = e
finally:
self.condvar.notify()
self.condvar.release()
def run(self):
debug('translate running...')
(accessible_window, gdk_window) = active_window()
if not accessible_window:
debug('No active window. Do nothing.')
return
debug('active: %s' % accessible_window)
qingfanyi.styles.init()
debug('taking snapshot')
snapshot = Snapshot(accessible_window, gdk_window)
snapshot_matcher = SnapshotMatcher(snapshot, self.dic)
debug('creating translate window')
translate_win = TranslateWindow(snapshot, snapshot_matcher)
translate_win.show()
snapshot_matcher.start()
PopupManager(translate_win)
# nested loop to make run() blocking
translate_win.connect('hide', lambda *_: Gtk.main_quit())
Gtk.main()
def run_event_loop(self):
debug('starting at-spi loop')
Registry.start(gil=False)
def __del__(self):
debug('closing.')
self.dic.close()
self.dic = None
_INSTANCE = None
def run():
if not _INSTANCE:
raise ValueError('run() called before init()')
_INSTANCE.run_in_other_thread()
debug('run complete')
def init():
global _INSTANCE
if _INSTANCE:
raise ValueError('init() called more than once')
_INSTANCE = Translate()
thread = threading.Thread(target=_INSTANCE.run_event_loop, daemon=True)
thread.start()
|
gpl-3.0
| -3,197,915,771,804,864,500
| 27.487179
| 75
| 0.660066
| false
| 3.949052
| false
| false
| false
|
fivethreeo/django-dragoman-blog
|
dragoman_blog/templatetags/dragoman_blog_tags.py
|
1
|
1443
|
from django import template
register = template.Library()
@register.inclusion_tag('admin/dragoman_blog/submit_line.html', takes_context=True)
def submit_row(context):
"""
Displays the row of buttons for delete and save.
"""
opts = context['opts']
change = context['change']
is_popup = context['is_popup']
save_as = context['save_as']
ctx = {
'opts': opts,
'onclick_attrib': (opts.get_ordered_objects() and change
and 'onclick="submitOrderForm();"' or ''),
'show_delete_link': (not is_popup and context['has_delete_permission']
and change and context.get('show_delete', True)),
'show_save_as_new': not is_popup and change and save_as,
'show_save_and_add_another': context['has_add_permission'] and
not is_popup and (not save_as or context['add']),
'show_save_and_continue': not is_popup and context['has_change_permission'],
'is_popup': is_popup,
'show_save': True
}
if context.get('original') is not None:
ctx['original'] = context['original']
if context.get('translation_language_code') is not None:
ctx['translation_language_code'] = context['translation_language_code']
if context.get('translation_language_field') is not None:
ctx['translation_language_field'] = context['translation_language_field']
return ctx
|
bsd-3-clause
| -6,141,228,518,920,310,000
| 42.757576
| 84
| 0.616078
| false
| 3.910569
| false
| false
| false
|
nbstr/demineur
|
functions.py
|
1
|
4297
|
#=========================================#
# IMPORTS #
#=========================================#
from beautiful import *
from random import randint
#=========================================#
# CONFIG #
#=========================================#
# DIFFICULTÉ
nb_cases = 9 # !! MAX 26
nb_bombes = 9 # !! MAX nb_cases**2
# SECURITÉ
if(nb_bombes >= nb_cases**2):
nb_bombes = nb_cases**2 - 1
# COORDONNÉES
alpha_maj = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
alpha_num = list(range(1, nb_cases + 1))
#=========================================#
# FUNCTIONS #
#=========================================#
def generer_bombes(hasard=False):
"""
Génère et place des bombes au hasard.
"""
if(hasard):
grille = {}
while len(grille) < (nb_bombes):
x = randint(0, nb_cases-1)
y = randint(0, nb_cases-1)
grille[(x, y)] = "B"
return grille
else:
grille = {}
grille [(0 ,7)] = "B"
grille [(1 ,5)] = "B"
grille [(1 ,6)] = "B"
grille [(1 ,8)] = "B"
grille [(2 ,4)] = "B"
grille [(3 ,4)] = "B"
grille [(5 ,5)] = "B"
grille [(5 ,7)] = "B"
grille [(7 ,0)] = "B"
grille [(7 ,5)] = "B"
return grille
def init_champ():
"""
Initialise le champ de mines.
"""
champ = []
for i in range(nb_cases):
champ.append(["*"] * nb_cases)
return champ
def print_champ(g):
"""
Affiche le champ de mines.
"""
print ("\n\n " + " ".join(str(col) for col in alpha_num))
print (" " + "-"*37)
for i, ligne in enumerate(g):
print (alpha_maj[i] + " | " + " | ".join(ligne) + " |\n " + "-"*37)
print ("\n")
def bombe(coord, grille):
"""
Vérifie s'il y a une bombe aux coordonnées indiquées.
"""
for bombe in grille:
if (bombe == coord):
return True;
return False;
def input_coordonnees():
"""
Demande au joueur de selectionner une case.
"""
# VALIDATION Y
while True:
y = input("• Veuillez entrer la lettre d’une ligne: ")
try:
y = int(alpha_maj[:nb_cases].index(y.upper()))
break
except ValueError:
print("!! La lettre de la ligne est invalide\n")
# VALIDATION X
while True:
x = input_int("• Veuillez entrer le numéro d’une colonne: ")
if(x < 1 or x > nb_cases):
print ("!! Le numéro de la colonne est invalide\n")
else:
x -= 1
break
return (x, y)
def compte_bombes(x, y, grille):
"""
Compte le nombre de bombes aux alentours.
"""
nombre_bombes = 0
for ligne in range(y-1, y+2):
for colonne in range(x-1, x+2):
# VERIFIER SI ON EST TOUJOURS SUR LE CHAMP DE MINES
if(colonne >= 0 and colonne < nb_cases and ligne >= 0 and ligne < nb_cases and (ligne != y or colonne != x)):
if(bombe((colonne, ligne), grille)):
nombre_bombes += 1
else:
return nombre_bombes
def afficher_case(champ, x, y, grille):
"""
Affiche le nombre de bombes adjacentes.
"""
nombre_bombes = compte_bombes(x, y, grille)
# REMPLIR LA CASE
if(nombre_bombes == 0):
champ[y][x] = " "
# AFFICHER LES CASES ADJACENTES
for l in range(y-1, y+2):
for c in range(x-1, x+2):
# VERIFIER SI ON EST TOUJOURS SUR LE CHAMP DE MINES
if(c >= 0 and c < nb_cases and l >= 0 and l < nb_cases and (l != y or c != x) and champ[l][c] == "*"):
sous_compte = compte_bombes(c, l, grille)
if(sous_compte == 0):
champ[l][c] = " "
champ = afficher_case(champ, c, l, grille)
else:
champ[l][c] = str(compte_bombes(c, l, grille))
else:
champ[y][x] = str(nombre_bombes)
return champ
def fin(champ, grille):
count = 0
for ligne in champ:
for element in ligne:
if(element == "*"):
count += 1
return count == len(grille)
|
unlicense
| -421,806,785,374,238,600
| 24.622754
| 121
| 0.45291
| false
| 3.065186
| false
| false
| false
|
dbednarski/pyhdust
|
pyhdust/input.py
|
1
|
29469
|
# -*- coding:utf-8 -*-
"""
PyHdust *input* module: Hdust input tools.
:co-author: Rodrigo Vieira
:license: GNU GPL v3.0 (https://github.com/danmoser/pyhdust/blob/master/LICENSE)
"""
import os as _os
import numpy as _np
from glob import glob as _glob
from itertools import product as _product
import pyhdust.phc as _phc
import pyhdust as _hdt
__author__ = "Daniel Moser"
__email__ = "dmfaes@gmail.com"
def makeDiskGrid(modn='01', mhvals=[1.5], hvals=[.6], rdvals=[18.6], mvals=None,
sig0vals=None, doFVDD=False, sBdays=None, sBfiles=None, selsources='*',
alpha=.5, mu=.5, R0r=300, Mdot11=False, path=None):
"""
| ###CONFIG. OPTIONS
| #MODEL NUMBER
| modn = '02'
| #The following filter will be applied to the SOURCE selection (string fmt)
| selsources = '*'
|
| #SUPERFICIAL DENSITY PROFILE EXPONENT
| mvals = [1.5,2.0,2.5,3.0]
| #VERTICAL DENSITY PROFILE EXPONENT
| mhvals = [1.5]
| #FRACTION OF TEFF OF PRIMARY STAR
| #This parameter sets if it you be FIXED to OB=1.1 case
| hvals = [72.]
| #DISK RADIUS EQUATORIAL...
| rdvals = [30.]
| #SIGMA_0 VALUES
| sig0vals = _np.logspace(_np.log10(0.02),_np.log10(4.0),7)
|
| #Do the Full VDD model for the corresponding sig0?
| doFVDD = True
| alpha = 0.5
| mu = 0.5
| #WARNING: it only generates a single R0 value per batch. If you want to change
| # it, run it twice (or more)
| R0r = 300
| ###END CONFIG.
"""
G = _phc.G.cgs
Msun = _phc.Msun.cgs
Rsun = _phc.Rsun.cgs
kB = _phc.kB.cgs
mH = _phc.mH.cgs
yr = _phc.yr.cgs
def doPL(prodI):
'''
Given a prodI (i.e., src,sig0,rd,h,m,mh), generates the Power-Law model
input
'''
src,sig0,rd,h,m,mh = prodI
M,Req,Tp = _hdt.readscr(src)
Mstr = str(M)
M *= Msun
Req *= Rsun
Th = h*Tp/100.
#a0 = (kB*h/100.*Tp/mu/mH)**.5
a = (kB*Th/mu/mH)**.5
n0 = (G*M/2./_np.pi)**.5*sig0/mu/mH/a/Req**1.5
#Th = a**2*mu*mH/kB
srcname = src.replace('source/','').replace('.txt','')
suffix = '_PLn{0:.1f}_sig{1:.2f}_h{2:03.0f}_Rd{3:05.1f}_{4}'.format(\
(m+mh),sig0,h,rd,srcname)
wmod = mod[:]
wmod[13]=wmod[13].replace('18.6',('%.2f' % rd))
wmod[20]=wmod[20].replace('2.0',('%.2f' % m))
wmod[33]=wmod[33].replace('1.5',('%.2f' % mh))
wmod[40]=wmod[40].replace('18000.',('%.1f' % Th))
wmod[52]=wmod[52].replace('2.35E13',('%.2e' % n0))
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
def doMdot(prodI):
'''
Given a prodI (i.e., src,sig0,rd,h,m,mh), generates the full VDD model
input
'''
src,sig0,rd,h,m,mh = prodI
M,Req,Tp = _hdt.readscr(src)
Mstr = str(M)
M *= Msun
Req *= Rsun
Th = h*Tp/100.
a = (kB*Th/mu/mH)**.5
#a0 = (kB*h/100*Tp/mu/mH)**.5
#a = a0*Req0*Req**.25/Req/Req**.25
R0 = R0r*Req
Mdot = sig0*Req**2*3*_np.pi*alpha*a**2/(G*M*R0)**.5 #SI units
Mdot = Mdot/Msun*yr
#Th = a**2*mu*mH/kB
srcname = src.replace('source/','').replace('.txt','')
#suffix = '_NI_Mdot{:.1e}_Rd{:.1f}_R0{:.1f}_alp{:.1f}_h{:.1f}_{}'.\
#format(Mdot,rd,R0/Req,alpha,h,srcname)
suffix = '_NIa{0:.1f}_sig{1:.2f}_h{2:03.0f}_Rd{3:05.1f}_{4}'.format(\
alpha,sig0,h,rd,srcname)
wmod = mod[:]
wmod[13]=wmod[13].replace('18.6',('%.2f' % rd))
wmod[18]=wmod[18].replace('1',('%d' % 2))
wmod[23]=wmod[23].replace('1.',('%.2f' % alpha))
wmod[24]=wmod[24].replace('= 0.',('= %.2f' % (R0/Req)))
wmod[25]=wmod[25].replace('= 0',('= %d' % 1))
wmod[31]=wmod[31].replace('0',('%d' % 1))
wmod[40]=wmod[40].replace('18000.','{0:.1f}'.format(Th))
wmod[49]=wmod[49].replace('2',('%d' % 3))
wmod[55]=wmod[55].replace('1.E-9',('%.2e' % Mdot))
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
def doSB(prodI, hseq=False):
'''
Given a prodI (i.e., sources,rdvals,hvals,mhvals,sBdays,sBfiles),
generates the Single Be based model input
'''
src,rd,h,mh,day,sfile = prodI
M,Req,Tp = _hdt.readscr(src)
Mstr = str(M)
M *= Msun
Req *= Rsun
Th = h*Tp/100.
#a0 = (kB*h/100.*Tp/mu/mH)**.5
a = (kB*Th/mu/mH)**.5
#~ n0 = (G*M/2./_np.pi)**.5*sig0/mu/mH/a/Req**1.5
#Th = a**2*mu*mH/kB
srcname = src.replace('source/','').replace('.txt','')
wmod = mod[:]
wmod[13]=wmod[13].replace('18.6',('%.2f' % rd))
wmod[18]=wmod[18].replace('= 1','= 4')
wmod[28]=wmod[28].replace('deltasco/Atsuo/1D/data/dSco_a035_01',(sfile))
wmod[29]=wmod[29].replace('2.3',('%.2f' % (day/365.25)))
if not hseq:
wmod[33]=wmod[33].replace('1.5',('%.2f' % mh))
suffix = '_SB{0}_{1:.1f}d_h{2:03.0f}_Rd{3:05.1f}_{4}'.format(\
_phc.trimpathname(sfile)[1],day,h,rd,srcname)
else:
wmod[31]=wmod[31].replace('= 0','= 1')
wmod[36]=wmod[36].replace('1.5',('%.2f' % mh))
suffix = '_SB{0}_{1:.1f}d_hseq_Rd{2:05.1f}_{3}'.format(\
_phc.trimpathname(sfile)[1],day,rd,srcname)
wmod[40]=wmod[40].replace('18000.',('%.1f' % Th))
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
###TODO Setup Tpole = REF of a (scale height)
#Tps = dict(zip(Ms, Tp11))
###PROGRAM BEGINS
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
#Check modN folder
if not _os.path.exists('mod{}'.format(modn)):
_os.system('mkdir mod{}'.format(modn))
#Select sources
sources = _glob('source/'+selsources)
#Load disk model
f0 = open('{0}/refs/REF_disco.txt'.format(_hdt.hdtpath()))
mod = f0.readlines()
f0.close()
if sBdays is None or sBfiles is None:
for prodI in _product(sources,sig0vals,rdvals,hvals,mvals,mhvals):
doPL(prodI)
i = 0
if doFVDD:
i = 1
doMdot(prodI)
print('# {0:.0f} arquivos foram gerados !!'.format(len(sources)*\
len(sig0vals)*len(rdvals)*len(hvals)*(len(mvals)+i)*len(mhvals)))
else:
for prodI in _product(sources,rdvals,hvals,mhvals,sBdays,sBfiles):
doSB(prodI)
i = 0
if doFVDD:
i = 1
doSB(prodI, hseq=True)
print('# {0:.0f} arquivos foram gerados !!'.format(len(sources)*\
len(rdvals)*len(hvals)*len(sBdays)*(len(mhvals)+i)*len(sBfiles)))
if path is not '':
_os.chdir(path0)
###END PROGRAM
return
def makeInpJob(modn='01', nodes=512, simulations=['SED'],
docases=[1,3], sim1=['step1'], sim2=['step1_ref'], composition=['pureH'],
controls=['controls'], gridcells=['grid'], observers=['observers'],
images=[''], clusters=['job'], srcid='',
walltime='24:00:00', wcheck=False, email='$USER@localhost', chkout=False,
st1max=20, st1refmax=24, ctrM=False, touch=False, srcNf=None, path=None):
"""
Create INP+JOB files to run Hdust.
All SOURCE files must initiate by "Be_". Otherwise, the `makeInpJob` will
not work. This is to satisfies the criteria of a specific disk model for
each source star.
| ### Start edit here ###
| modn = '02'
|
| #clusters config
| # job = AlphaCrucis; oar = MesoCentre Licallo; ge = MesoCentre FRIPP
| clusters = ['job','oar','ge','bgp']
| clusters = ['oar']
| nodes = 48
| #if wcheck == True, walltime will be AUTOMATICALLY estimated
| walltime = '3:00:00'
| wcheck = True
| email = 'user@gmail.com'
|
| #Check if the outputs already exist
| chkout = True
| #Above the values below, the step1 will be considered done!
| st1max = 26
| st1refmax = 30
| #Gera inp+job so' para o source com '1.45' no nome
| #Nao funciona caracteres especiais como * ou ?
| srcid = '1.45'
| srcid = ''
| #Se um dos 3 casos nao estiver presente, ele gera input comentado.
| docases = [1,2,3]
| #1 = step1 <> Gera inp+job so' para mod#/mod#.txt (SEM source, so disco)
| #habilita ADDSUFFIX; retira OBSERVERS e IMAGES
| sim1 = 'step1'
| #2 = step1_refine
| sim2 = 'step1_refine'
| #3 = outros <> Gera inp+job so' para mod#/mod#SOURCE.txt (post-proc.)
| #retira ADDSUFFIX; adiciona OBSERVERS (e talvez IMAGES)
| simulations = ['sed','h','brg','halpha','uv']
| simulations = ['sed_sig','brg_M','halpha_M','uv','j','h','k','l','m','n','q1','q2']
| simulations = ['SED','Ha']
| images = ['','h','brg','halpha','uv']
| images = simulations[:]
| composition = 'pureH'
| controls = 'no_op'
| controls = 'controls'
| ctrM = False
| gridcells = 'grid'
| observers = 'obs'
| touch = True
| ###stop edition here
"""
def isFloat(x):
try:
a = float(x)
except ValueError:
return False
else:
return True
def doCase1(inp,cases):
case1 = inp[:]
case1[0] = case1[0].replace('suffix',suf)
case1[1] = case1[1].replace('pureH',composition)
if ctrM:
i = suf.find('_M')
M = suf[i:i+7]
case1[2] = case1[2].replace('controls',controls+M)
else:
case1[2] = case1[2].replace('controls',controls)
case1[3] = case1[3].replace('grid',gridcells)
case1[4] = case1[4].replace('step1',sim1)
case1[5] = case1[5].replace('source',src)
if 1 not in cases:
for i in range(len(case1)):
case1[i] = '!~ '+case1[i]
return case1
def doCase2(inp,cases):
case1 = inp[:]
case1[0] = case1[0].replace('suffix',suf)
case1[1] = case1[1].replace('pureH',composition)
if ctrM:
i = suf.find('_M')
M = suf[i:i+7]
case1[2] = case1[2].replace('controls',controls+M)
else:
case1[2] = case1[2].replace('controls',controls)
case1[3] = case1[3].replace('grid',gridcells)
case1[4] = case1[4].replace('step1',sim2)
case1[5] = case1[5].replace('source',src)
if 2 not in cases:
for i in range(len(case1)):
case1[i] = '!~ '+case1[i]
return case1
def doCase3(inp,simchk):
case3 = []
for i in range(len(simulations)):
case1 = inp[:]
case1[0] = case1[0].replace('suffix',suf)
case1[1] = case1[1].replace('pureH',composition)
if ctrM:
j = suf.find('_M')
M = suf[j:j+7]
case1[2] = case1[2].replace('controls',controls+M)
else:
case1[2] = case1[2].replace('controls',controls)
case1[3] = case1[3].replace('grid',gridcells)
case1[5] = case1[5].replace('source',src)
if simulations[i] == 'SED':
sig = suf[suf.find('_sig')+4:suf.find('_sig')+8]
if isFloat(sig) and srcNf[i]:
case1[4] = case1[4].replace('step1','SED_sig{0}'.format(sig))
else:
case1[4] = case1[4].replace('step1',simulations[i])
elif srcNf[i]:
case1[4] = case1[4].replace('step1','{0}_{1}'.format(\
simulations[i],src))
else:
case1[4] = case1[4].replace('step1',simulations[i])
case1.append("OBSERVERS = '{0}'\n".format(observers))
if images[i] != '':
case1.append("IMAGES = '{0}'\n".format(images[i]))
case1.append('\n')
if not simchk[i]:
for i in range(len(case1)):
case1[i] = '!~ '+case1[i]
case3 += case1
return case3
def doJobs(mod, sel, nodes, addtouch='\n'):
#load Ref
f0 = open('{0}/refs/REF.{1}'.format(_hdt.hdtpath(),sel))
wout = f0.readlines()
f0.close()
outname = mod[mod.find('/')+1:].replace('txt',sel)
f0 = open('{0}s/{0}s_{1}_mod{2}.sh'.format(sel,proj,modn),'a')
if sel == 'job':
wout[4] = wout[4].replace('128','{0}'.format(nodes))
wout[4] = wout[4].replace('36:00:00','{0}'.format(walltime))
wout[8] = wout[8].replace('alexcarciofi@gmail.com','{0}'.format(email))
wout[11] = wout[11].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
if touch:
wout[24] = addtouch
modchmod = _phc.trimpathname(mod)
modchmod[1] = modchmod[1].replace('.txt','*')
wout[31] = 'chmod 664 {0}/{1}/*{2}\nchmod 664 log/*\nchmod 664 ../../tmp/*\n'.\
format(proj, *modchmod)
f0.writelines('qsub {0}/{1}s/{2}\n'.format(proj,sel,outname))
elif sel == 'oar':
wout[2] = wout[2].replace('12','{0}'.format(nodes))
wout[2] = wout[2].replace('24:0:0','{0}'.format(walltime))
wout[10] = wout[10].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
f0.writelines('chmod a+x {0}/{1}s/{2}\n'.format(proj,sel,outname))
f0.writelines('oarsub -S ./{0}/{1}s/{2}\n'.format(proj,sel,outname))
elif sel == 'ge':
wout[3] = wout[3].replace('48','{0}'.format(nodes))
wout[4] = wout[4].replace('45:00:00','{0}'.format(walltime))
wout[7] = wout[7].replace('dmfaes@gmail.com','{0}'.format(email))
wout[11] = wout[11].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
f0.writelines('qsub -P hdust {0}/{1}s/{2}\n'.format(proj,sel,outname))
elif sel == 'bgp':
wout[14] = wout[14].replace('512','{0}'.format(nodes))
nodes = int(nodes)
if nodes%512 != 0:
nrsv = (nodes//512+1)*128
else:
nrsv = (nodes//512)*128
wout[10] = wout[10].replace('128','{0}'.format(nrsv))
wout[4] = wout[4].replace('24:00:00','{0}'.format(walltime))
wout[14] = wout[14].replace('hdust_bestar2.02.inp','{0}/{1}'.\
format(proj,mod.replace('.txt','.inp')))
f0.writelines('chmod +x {0}/{1}s/{2}\n'.format(proj,sel,outname))
f0.writelines('llsubmit ./{0}/{1}s/{2}\n'.format(proj,sel,outname))
f0.close()
f0 = open('{0}s/{1}'.format(sel,outname),'w')
f0.writelines(wout)
print('# Saved: {0}s/{1}'.format(sel,outname))
f0.close()
return
#PROGRAM START
if srcNf == None:
srcNf = len(simulations)*[False]
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
#obtain the actual directory
proj = _os.getcwd()
proj = proj[proj.rfind('/')+1:]
#Folder's checks
for sel in clusters:
if not _os.path.exists('{0}s'.format(sel)):
_os.system('mkdir {0}s'.format(sel))
elif _os.path.exists('{0}s/{0}s_{1}_mod{2}.sh'.format(sel,proj,modn)):
_os.system('rm {0}s/{0}s_{1}_mod{2}.sh'.format(sel,proj,modn))
#list of mods
mods = _glob('mod{0}/mod{0}*.txt'.format(modn))
#load REF_inp
f0 = open('{0}/refs/REF_inp.txt'.format(_hdt.hdtpath()))
inp = f0.readlines()
f0.close()
for mod in mods:
#Write inps
f0 = open(mod.replace('.txt','.inp'),'w')
f0.writelines('PROJECT = {0}\nMODEL = {1}\n\n'.format(proj,modn))
suf = mod[mod.find('_'):-4]
src = mod[mod.find('Be_'):-4]
if src.find(srcid) == -1:
continue
cases = docases[:]
#Do the touch thing
addtouch = '\n'
addtouch += 'chmod 664 ../../tmp/*\nchmod 664 {0}/mod{1}/*\n'.format(proj,modn)
if touch and ( (1 in cases) or (2 in cases) ):
addtouch += 'touch {0}/{1}\n'.format(proj, mod.replace('.txt','.log'))
if touch and 3 in cases:
for sim in simulations:
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','.chk')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','.err')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','.log')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','_SEI.chk')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','_SEI.err')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
addtouch += 'touch {0}/{1}\n'.format(proj,mod.replace('.txt','_SEI.log')).replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim))
err90a = '{0}/{1}'.format(proj,mod.replace('.txt','.err').replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim)))
err90b = '{0}/{1}'.format(proj,mod.replace('.txt','_SEI.err').replace('mod{0}/mod{0}'.format(modn), 'mod{0}/{1}_mod{0}'.format(modn, sim)))
addtouch += 'touch {0}\n'.format(err90a[:90])
addtouch += 'touch {0}\n'.format(err90b[:90])
addtouch += 'touch {0}\n'.format(err90a[:90].replace(".err",".chk").replace(".er",".ch").replace(".e",".c"))
addtouch += 'touch {0}\n'.format(err90b[:90].replace(".err",".chk").replace(".er",".ch").replace(".e",".c"))
modchmod = _phc.trimpathname(mod)
modchmod[1] = modchmod[1].replace('.txt','*')
#~ addtouch += 'chmod 664 {0}/{1}/*{2}\n'.format(proj, *modchmod)
#Set simulation check variable
if 3 in cases:
simchk = _np.ones(len(simulations), dtype=bool)
else:
simchk = _np.zeros(len(simulations), dtype=bool)
if _os.path.exists(mod.replace('.txt','{0:02d}.temp'.format(st1max))) \
and chkout and 1 in cases:
cases.remove(1)
case1 = doCase1(inp,cases)
f0.writelines(case1+['\n'])
if _os.path.exists(mod.replace('.txt','{0:02d}.temp'.format(st1refmax)))\
and chkout and 2 in cases:
cases.remove(2)
case2 = doCase2(inp,cases)
f0.writelines(case2+['\n'])
if chkout and 3 in cases:
for i in range(len(simulations)):
outs2a = 'mod{0}/{1}_mod{0}{2}.sed2'.format(modn,simulations[i],suf)
outs2b = 'mod{0}/{1}_mod{0}{2}_SEI.sed2'.format(modn,simulations[i],suf)
if _os.path.exists(outs2a) or _os.path.exists(outs2b):
simchk[i] = False
if True not in simchk:
cases.remove(3)
case3 = doCase3(inp,simchk)
f0.writelines(case3)
f0.close()
#Def automatic walltime:
if wcheck:
h = 0
if 1 in cases:
h+=1
if 2 in cases:
h+=1
idx = _np.where(simchk==True)
if len(idx[0])>0:
extra = 4+len(idx[0])
h = h+extra*48/nodes
walltime = '{0}:0:0'.format(h)
#Del old jobs
for sel in clusters:
outname = mod[mod.find('/')+1:].replace('txt',sel)
if _os.path.exists('{0}s/{1}'.format(sel,outname)):
_os.system('rm {0}s/{1}'.format(sel,outname))
#Write jobs (if necessary)
if len(cases)>0:
for sel in clusters:
doJobs(mod,sel,nodes,addtouch)
if path is not '':
_os.chdir(path0)
#PROGRAM END
return
def makeNoDiskGrid(modn, selsources, path=None):
"""
#Create a model list with random disk parameters ("noCS" in filename)
INPUT: modn = '01'; selsources = '*' (filter that is applied to the SOURCE
selection).
OUTPUT: Files written
"""
def doNoCS(src):
'''
Given a src, generates the noCS model input
'''
srcname = src.replace('source/','').replace('.txt','')
suffix = '_noCS_{}'.format(srcname)
wmod = mod[:]
#Remove a disk does not work:
#wmod[9]=wmod[9].replace('1','0')
wmod[13]=wmod[13].replace('18.6','2.0')
f0=open('mod'+modn+'/mod'+modn+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
return
###PROGRAM BEGINS
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
#Check modN folder
if not _os.path.exists('mod{}'.format(modn)):
_os.system('mkdir mod{}'.format(modn))
#Select sources
sources = _glob('source/'+selsources)
#Load disk model
f0 = open('{0}/refs/REF_disco.txt'.format(_hdt.hdtpath()))
mod = f0.readlines()
f0.close()
for prodI in _product(sources):
prodI = prodI[0]
doNoCS(prodI)
print('# {:.0f} arquivos foram gerados !!'.format(len(sources)))
if path is not "":
_os.chdir(path0)
###END PROGRAM
return
def makeSimulLine(vrots, basesims, Rs, hwidth, Ms, Obs, suffix):
"""
| vrots = [[167.023,229.187,271.072,301.299,313.702],
| [177.998,244.636,290.596,324.272,338.298],
| [192.612,267.017,318.288,355.320,370.638],
| [202.059,281.667,335.158,373.716,389.782],
| [209.244,292.409,358.626,410.439,430.844],
| [214.407,297.661,357.799,402.628,420.683]]
| vrots = [[259.759,354.834,417.792,464.549,483.847],
| [252.050,346.163,406.388,449.818,468.126],
| [245.127,336.834,399.983,448.076,467.806],
| [239.522,329.496,388.734,432.532,450.806],
| [234.301,321.139,379.297,423.241,441.122],
| [228.538,313.797,370.343,412.488,429.914],
| [219.126,299.656,354.547,395.821,413.008],
| [211.544,288.840,341.081,380.426,396.978],
| [203.438,279.328,328.666,365.697,380.660],
| [197.823,268.964,316.901,353.568,368.506],
| [192.620,262.688,308.208,341.963,356.410],
| [187.003,255.125,299.737,332.511,346.043]]
|
| basesims = ['simulation/Brg.txt','simulation/Ha.txt']
| Rs = [12000, 20000]
|
| Ms = [4.00,5.00,7.00,9.00,12.00,15.00]
| Ms = [14.6, 12.5, 10.8, 9.6, 8.6, 7.7, 6.4, 5.5, 4.8, 4.2, 3.8, 3.4]
| Obs = [1.1,1.2,1.3,1.4,1.45]
| suffix = 'H0.30_Z0.014_bE_Ell'
"""
c = _phc.c.cgs
for prodI in _product(Ms,Obs,basesims):
M,Ob,basesim = prodI
f0 = open(basesim)
mod = f0.readlines()
f0.close()
srcid = 'Be_M{0:05.2f}_ob{1:.2f}'.format(M,Ob)
i = Ms.index(M)
j = Obs.index(Ob)
k = basesims.index(basesim)
R = Rs[k]
nmod = mod[:]
vel = '{0:.1f}'.format(hwidth+vrots[i][j])
nmod[103] = nmod[103].replace('1020.',vel)
n = str(int(round(2*(hwidth+vrots[i][j])*R/c*1e5)))
print(srcid, n)
nmod[100] = nmod[100].replace('100',n)
f0 = open(basesim.replace('.txt','_{0}_{1}.txt'.format(srcid, suffix)),'w')
f0.writelines(nmod)
f0.close()
return
def makeStarGrid(oblats, Hfs, path=None):
"""
| INPUT: oblats = [1.1,1.2,1.3,1.4,1.45] (example)
| Hfs = [0.3] (example)
Masses list a Z value are inside `geneve_par.pro` file.
"""
path0 = _os.getcwd()
if path != None:
_os.chdir(path)
if path[-1] != '/':
path += '/'
else:
path = ''
if not _os.path.exists('stmodels'):
_os.system('mkdir stmodels')
try:
runIDL = True
import pidly
except ImportError:
print('# This system do not have pIDLy installed...')
runIDL = False
if runIDL:
key = raw_input('# Do you want to run "geneve_par" (y/other):')
if key != 'y':
runIDL = False
if runIDL:
import pidly
idl = pidly.IDL()
propath = _hdt.hdtpath()+'/refs/'
idl('cd,"{0}"'.format(propath))
idl('.r geneve_par')
for ob in oblats:
for H in Hfs:
idl('geneve_par, {}, {}, /oblat,/makeeps'.format(ob,H))
_os.system('mv {}/geneve_lum.eps stmodels/geneve_lum_{:.2f}_{:.2f}.eps'.format(propath,ob,H))
_os.system('mv {}/geneve_rp.eps stmodels/geneve_rp_{:.2f}_{:.2f}.eps'.format(propath,ob,H))
_os.system('mv {}/geneve_par.txt stmodels/oblat{}_h{}.txt'.format(propath,ob,H))
idl.close()
f0 = open('{0}/refs/REF_estrela.txt'.format(_hdt.hdtpath()))
mod = f0.readlines()
f0.close()
if not _os.path.exists('source'):
_os.system('mkdir source')
for ob in oblats:
for H in Hfs:
f0 = open('stmodels/oblat{}_h{}.txt'.format(ob,H))
matriz = f0.readlines()
f0.close()
Omega,W,Beta = map(float, matriz[1].split())
m2 = []
for i in range(4,len(matriz)):
if len(matriz[i])>1:
m2 += [matriz[i].split()[1:]]
matriz = _np.array(m2, dtype=float)
M = matriz[:,0] #MASS (SOLAR MASSES)
M = list(M)
Rp = matriz[:,1] #POLAR RADIUS (SOLAR RADII)
Rp = list(Rp)
L = matriz[:,2] #LUMINOSITY (in solar lum.)
L = list(L)
Z = [0.014] #METALLICITY(=Zsolar)
#(other options: 0.006, 0.002)
print 'Omega = ', Omega; print 'W = ', W; print 'beta = ', Beta;
print 'M = ', M; print 'Rp = ', Rp; print 'L = ', L
print "%.0f arquivos gerados\n" % (len(M)*len(Hfs))
#DEFINE ALL INDEX
for MI in M:
a = M.index(MI)
Raio = Rp[a]
Lum = L[a]
for RpI in Rp:
b = Rp.index(RpI)
for LI in L:
d = L.index(LI)
for ZI in Z:
g = Z.index(ZI)
suffix = '_M{:05.2f}_ob{:.2f}_H{:.2f}_Z{}_bE_Ell'. \
format(MI,ob,H,ZI,Beta,RpI,LI)
#REGISTRA VALORES
wmod = mod[:]
wmod[3]=wmod[3].replace('10.3065',('%.2f' % MI))
wmod[4]=wmod[4].replace('5.38462',('%.2f' % Raio))
wmod[5]=wmod[5].replace('0.775',('%.4f' % W))
wmod[6]=wmod[6].replace('7500.',('%.2f' % Lum))
wmod[7]=wmod[7].replace('0.25',('%.5f' % Beta))
f0=open('source/Be'+suffix+'.txt', 'w')
f0.writelines(wmod)
f0.close()
#
if path is not "":
_os.chdir(path0)
return
def makeSimulDens(dbase, basesim):
"""
Sets the SED simulations number of photos so that the signal/noise level
is approximately constant at visible polarization.
|dbase = _np.logspace(_np.log10(0.02),_np.log10(4.0),7)
|basesim = 'simulation/sed.txt'
"""
f0 = open(basesim)
mod = f0.readlines()
f0.close()
#fact = 2. Tempo execucao = d/1e13*fact
#Nf0 = 500000000
for d in dbase:
srcid = 'sig{0:.2f}'.format(d)
#alpha = .39794
#beta = 13.87219
alpha = 0.34588
beta = 8.50927
newd = int(10**(-alpha*_np.log10(d)+beta))
print('{}, N_f = {:.2f}e+9'.format(srcid, newd/1e9))
nmod = mod[:]
nmod[9]=nmod[9].replace('500000000','{}'.format(newd))
f0 = open(basesim.replace('.txt','_{}.txt'.format(srcid)),'w')
f0.writelines(nmod)
f0.close()
#a = raw_input('asdads')
return
### MAIN ###
if __name__ == "__main__":
pass
|
gpl-3.0
| -8,789,998,822,184,297,000
| 36.208333
| 166
| 0.488683
| false
| 2.934283
| false
| false
| false
|
MyPureCloud/kafka
|
release.py
|
1
|
24941
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility for creating release candidates and promoting release candidates to a final relase.
Usage: release.py
The utility is interactive; you will be prompted for basic release information and guided through the process.
This utility assumes you already have local a kafka git folder and that you
have added remotes corresponding to both:
(i) the github apache kafka mirror and
(ii) the apache kafka git repo.
"""
from __future__ import print_function
import datetime
from getpass import getpass
import json
import os
import subprocess
import sys
import tempfile
PROJECT_NAME = "kafka"
CAPITALIZED_PROJECT_NAME = "kafka".upper()
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
# Location of the local git repository
REPO_HOME = os.environ.get("%s_HOME" % CAPITALIZED_PROJECT_NAME, SCRIPT_DIR)
# Remote name, which points to Github by default
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "apache-github")
PREFS_FILE = os.path.join(SCRIPT_DIR, '.release-settings.json')
delete_gitrefs = False
work_dir = None
def fail(msg):
if work_dir:
cmd("Cleaning up work directory", "rm -rf %s" % work_dir)
if delete_gitrefs:
try:
cmd("Resetting repository working state to branch %s" % starting_branch, "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
cmd("Deleting git tag %s" %rc_tag , "git tag -d %s" % rc_tag, shell=True)
except subprocess.CalledProcessError:
print("Failed when trying to clean up git references added by this script. You may need to clean up branches/tags yourself before retrying.")
print("Expected git branch: " + release_version)
print("Expected git tag: " + rc_tag)
print(msg)
sys.exit(1)
def print_output(output):
if output is None or len(output) == 0:
return
for line in output.split('\n'):
print(">", line)
def cmd(action, cmd, *args, **kwargs):
if isinstance(cmd, basestring) and not kwargs.get("shell", False):
cmd = cmd.split()
allow_failure = kwargs.pop("allow_failure", False)
stdin_log = ""
if "stdin" in kwargs and isinstance(kwargs["stdin"], basestring):
stdin_log = "--> " + kwargs["stdin"]
stdin = tempfile.TemporaryFile()
stdin.write(kwargs["stdin"])
stdin.seek(0)
kwargs["stdin"] = stdin
print(action, cmd, stdin_log)
try:
output = subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
print_output(output)
except subprocess.CalledProcessError as e:
print_output(e.output)
if allow_failure:
return
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
def cmd_output(cmd, *args, **kwargs):
if isinstance(cmd, basestring):
cmd = cmd.split()
return subprocess.check_output(cmd, *args, stderr=subprocess.STDOUT, **kwargs)
def replace(path, pattern, replacement):
updated = []
with open(path, 'r') as f:
for line in f:
updated.append((replacement + '\n') if line.startswith(pattern) else line)
with open(path, 'w') as f:
for line in updated:
f.write(line)
def user_ok(msg):
ok = raw_input(msg)
return ok.lower() == 'y'
def sftp_mkdir(dir):
basedir, dirname = os.path.split(dir)
if not basedir:
basedir = "."
try:
cmd_str = """
cd %s
mkdir %s
""" % (basedir, dirname)
cmd("Creating '%s' in '%s' in your Apache home directory if it does not exist (errors are ok if the directory already exists)" % (dirname, basedir), "sftp -b - %s@home.apache.org" % apache_id, stdin=cmd_str, allow_failure=True)
except subprocess.CalledProcessError:
# This is ok. The command fails if the directory already exists
pass
def get_pref(prefs, name, request_fn):
"Get a preference from existing preference dictionary or invoke a function that can collect it from the user"
val = prefs.get(name)
if not val:
val = request_fn()
prefs[name] = val
return val
# Load saved preferences
prefs = {}
if os.path.exists(PREFS_FILE):
with open(PREFS_FILE, 'r') as prefs_fp:
prefs = json.load(prefs_fp)
if not user_ok("""Requirements:
1. Updated docs to reference the new release version where appropriate.
2. JDK7 and JDK8 compilers and libraries
3. Your Apache ID, already configured with SSH keys on id.apache.org and SSH keys available in this shell session
4. All issues in the target release resolved with valid resolutions (if not, this script will report the problematic JIRAs)
5. A GPG key used for signing the release. This key should have been added to public Apache servers and the KEYS file on the Kafka site
6. Standard toolset installed -- git, gpg, gradle, sftp, etc.
7. ~/.gradle/gradle.properties configured with the signing properties described in the release process wiki, i.e.
mavenUrl=https://repository.apache.org/service/local/staging/deploy/maven2
mavenUsername=your-apache-id
mavenPassword=your-apache-passwd
signing.keyId=your-gpgkeyId
signing.password=your-gpg-passphrase
signing.secretKeyRingFile=/Users/your-id/.gnupg/secring.gpg (if you are using GPG 2.1 and beyond, then this file will no longer exist anymore, and you have to manually create it from the new private key directory with "gpg --export-secret-keys -o ~/.gnupg/secring.gpg")
8. ~/.m2/settings.xml configured for pgp signing and uploading to apache release maven, i.e.,
<server>
<id>apache.releases.https</id>
<username>your-apache-id</username>
<password>your-apache-passwd</password>
</server>
<server>
<id>your-gpgkeyId</id>
<passphrase>your-gpg-passphase</passphrase>
</server>
<profile>
<id>gpg-signing</id>
<properties>
<gpg.keyname>your-gpgkeyId</gpg.keyname>
<gpg.passphraseServerId>your-gpgkeyId</gpg.passphraseServerId>
</properties>
</profile>
9. You may also need to update some gnupgp configs:
~/.gnupg/gpg-agent.conf
allow-loopback-pinentry
~/.gnupg/gpg.conf
use-agent
pinentry-mode loopback
echo RELOADAGENT | gpg-connect-agent
If any of these are missing, see https://cwiki.apache.org/confluence/display/KAFKA/Release+Process for instructions on setting them up.
Some of these may be used from these previous settings loaded from %s:
%s
Do you have all of of these setup? (y/n): """ % (PREFS_FILE, json.dumps(prefs, indent=2))):
fail("Please try again once you have all the prerequisites ready.")
starting_branch = cmd_output('git rev-parse --abbrev-ref HEAD')
cmd("Verifying that you have no unstaged git changes", 'git diff --exit-code --quiet')
cmd("Verifying that you have no staged git changes", 'git diff --cached --exit-code --quiet')
release_version = raw_input("Release version (without any RC info, e.g. 1.0.0): ")
try:
release_version_parts = release_version.split('.')
if len(release_version_parts) != 3:
fail("Invalid release version, should have 3 version number components")
# Validate each part is a number
[int(x) for x in release_version_parts]
except ValueError:
fail("Invalid release version, should be a dotted version number")
rc = raw_input("Release candidate number: ")
dev_branch = '.'.join(release_version_parts[:2])
docs_version = ''.join(release_version_parts[:2])
# Validate that the release doesn't already exist and that the
cmd("Fetching tags from upstream", 'git fetch --tags %s' % PUSH_REMOTE_NAME)
tags = cmd_output('git tag').split()
if release_version in tags:
fail("The specified version has already been tagged and released.")
# TODO promotion
if not rc:
fail("Automatic Promotion is not yet supported.")
# Find the latest RC and make sure they want to promote that one
rc_tag = sorted([t for t in tags if t.startswith(release_version + '-rc')])[-1]
if not user_ok("Found %s as latest RC for this release. Is this correct? (y/n): "):
fail("This script couldn't determine which RC tag to promote, you'll need to fix up the RC tags and re-run the script.")
sys.exit(0)
# Prereq checks
apache_id = get_pref(prefs, 'apache_id', lambda: raw_input("Enter your apache username: "))
jdk7_java_home = get_pref(prefs, 'jdk7', lambda: raw_input("Enter the path for JAVA_HOME for a JDK7 compiler (blank to use default JAVA_HOME): "))
jdk7_env = dict(os.environ) if jdk7_java_home.strip() else None
if jdk7_env is not None: jdk7_env['JAVA_HOME'] = jdk7_java_home
if "1.7.0" not in cmd_output("java -version", env=jdk7_env):
fail("You must be able to build artifacts with JDK7 for Scala 2.10 and 2.11 artifacts")
jdk8_java_home = get_pref(prefs, 'jdk8', lambda: raw_input("Enter the path for JAVA_HOME for a JDK8 compiler (blank to use default JAVA_HOME): "))
jdk8_env = dict(os.environ) if jdk8_java_home.strip() else None
if jdk8_env is not None: jdk8_env['JAVA_HOME'] = jdk8_java_home
if "1.8.0" not in cmd_output("java -version", env=jdk8_env):
fail("You must be able to build artifacts with JDK8 for Scala 2.12 artifacts")
def select_gpg_key():
print("Here are the available GPG keys:")
available_keys = cmd_output("gpg --list-secret-keys")
print(available_keys)
key_name = raw_input("Which user name (enter the user name without email address): ")
if key_name not in available_keys:
fail("Couldn't find the requested key.")
return key_name
key_name = get_pref(prefs, 'gpg-key', select_gpg_key)
gpg_passphrase = get_pref(prefs, 'gpg-pass', lambda: getpass("Passphrase for this GPG key: "))
# Do a quick validation so we can fail fast if the password is incorrect
with tempfile.NamedTemporaryFile() as gpg_test_tempfile:
gpg_test_tempfile.write("abcdefg")
cmd("Testing GPG key & passphrase", ["gpg", "--batch", "--pinentry-mode", "loopback", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", gpg_test_tempfile.name + ".asc", "--detach-sig", gpg_test_tempfile.name], stdin=gpg_passphrase)
# Save preferences
print("Saving preferences to %s" % PREFS_FILE)
with open(PREFS_FILE, 'w') as prefs_fp:
prefs = json.dump(prefs, prefs_fp)
# Generate RC
try:
int(rc)
except ValueError:
fail("Invalid release candidate number: %s" % rc)
rc_tag = release_version + '-rc' + rc
delete_gitrefs = True # Since we are about to start creating new git refs, enable cleanup function on failure to try to delete them
cmd("Checking out current development branch", "git checkout -b %s %s" % (release_version, PUSH_REMOTE_NAME + "/" + dev_branch))
print("Updating version numbers")
replace("gradle.properties", "version", "version=%s" % release_version)
replace("tests/kafkatest/__init__.py", "__version__", "__version__ = '%s'" % release_version)
cmd("update streams quickstart pom", ["sed", "-i", ".orig"," s/-SNAPSHOT//", "streams/quickstart/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/pom.xml"])
cmd("update streams quickstart java pom", ["sed", "-i", ".orig", "s/-SNAPSHOT//", "streams/quickstart/java/src/main/resources/archetype-resources/pom.xml"])
cmd("remove backup pom.xml", "rm streams/quickstart/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/pom.xml.orig")
cmd("remove backup java pom.xml", "rm streams/quickstart/java/src/main/resources/archetype-resources/pom.xml.orig")
# Command in explicit list due to messages with spaces
cmd("Commiting version number updates", ["git", "commit", "-a", "-m", "Bump version to %s" % release_version])
# Command in explicit list due to messages with spaces
cmd("Tagging release candidate %s" % rc_tag, ["git", "tag", "-a", rc_tag, "-m", rc_tag])
rc_githash = cmd_output("git show-ref --hash " + rc_tag)
cmd("Switching back to your starting branch", "git checkout %s" % starting_branch)
# Note that we don't use tempfile here because mkdtemp causes problems with sftp and being able to determine the absolute path to a file.
# Instead we rely on a fixed path and if it
work_dir = os.path.join(REPO_HOME, ".release_work_dir")
if os.path.exists(work_dir):
fail("A previous attempt at a release left dirty state in the work directory. Clean up %s before proceeding. (This attempt will try to cleanup, simply retrying may be sufficient now...)" % work_dir)
os.makedirs(work_dir)
print("Temporary build working director:", work_dir)
kafka_dir = os.path.join(work_dir, 'kafka')
streams_quickstart_dir = os.path.join(kafka_dir, 'streams/quickstart')
print("Streams quickstart dir", streams_quickstart_dir)
cmd("Creating staging area for release artifacts", "mkdir kafka-" + rc_tag, cwd=work_dir)
artifacts_dir = os.path.join(work_dir, "kafka-" + rc_tag)
cmd("Cloning clean copy of repo", "git clone %s kafka" % REPO_HOME, cwd=work_dir)
cmd("Checking out RC tag", "git checkout -b %s %s" % (release_version, rc_tag), cwd=kafka_dir)
current_year = datetime.datetime.now().year
cmd("Verifying the correct year in NOTICE", "grep %s NOTICE" % current_year, cwd=kafka_dir)
with open(os.path.join(artifacts_dir, "RELEASE_NOTES.html"), 'w') as f:
print("Generating release notes")
try:
subprocess.check_call(["./release_notes.py", release_version], stdout=f)
except subprocess.CalledProcessError as e:
print_output(e.output)
print("*************************************************")
print("*** First command failure occurred here. ***")
print("*** Will now try to clean up working state. ***")
print("*************************************************")
fail("")
params = { 'release_version': release_version,
'rc_tag': rc_tag,
'artifacts_dir': artifacts_dir
}
cmd("Creating source archive", "git archive --format tar.gz --prefix kafka-%(release_version)s-src/ -o %(artifacts_dir)s/kafka-%(release_version)s-src.tgz %(rc_tag)s" % params)
cmd("Building artifacts", "gradle", cwd=kafka_dir, env=jdk7_env)
cmd("Building artifacts", "./gradlew clean releaseTarGzAll aggregatedJavadoc", cwd=kafka_dir, env=jdk7_env)
# we need extra cmd to build 2.12 with jdk8 specifically
cmd("Building artifacts for Scala 2.12", "./gradlew releaseTarGz -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Copying artifacts", "cp %s/core/build/distributions/* %s" % (kafka_dir, artifacts_dir), shell=True)
cmd("Copying artifacts", "cp -R %s/build/docs/javadoc %s" % (kafka_dir, artifacts_dir))
for filename in os.listdir(artifacts_dir):
full_path = os.path.join(artifacts_dir, filename)
if not os.path.isfile(full_path):
continue
# Commands in explicit list due to key_name possibly containing spaces
cmd("Signing " + full_path, ["gpg", "--batch", "--passphrase-fd", "0", "-u", key_name, "--armor", "--output", full_path + ".asc", "--detach-sig", full_path], stdin=gpg_passphrase)
cmd("Verifying " + full_path, ["gpg", "--verify", full_path + ".asc", full_path])
# Note that for verification, we need to make sure only the filename is used with --print-md because the command line
# argument for the file is included in the output and verification uses a simple diff that will break if an absolut path
# is used.
dir, fname = os.path.split(full_path)
cmd("Generating MD5 for " + full_path, "gpg --print-md md5 %s > %s.md5" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA1 for " + full_path, "gpg --print-md sha1 %s > %s.sha1" % (fname, fname), shell=True, cwd=dir)
cmd("Generating SHA512 for " + full_path, "gpg --print-md sha512 %s > %s.sha512" % (fname, fname), shell=True, cwd=dir)
cmd("Listing artifacts to be uploaded:", "ls -R %s" % artifacts_dir)
if not user_ok("Going to upload the artifacts in %s, listed above, to your Apache home directory. Ok (y/n)?): " % artifacts_dir):
fail("Quitting")
sftp_mkdir("public_html")
kafka_output_dir = "kafka-" + rc_tag
sftp_mkdir(os.path.join("public_html", kafka_output_dir))
public_release_dir = os.path.join("public_html", kafka_output_dir)
# The sftp -r option doesn't seem to work as would be expected, at least with the version shipping on OS X. To work around this we process all the files and directories manually...
sftp_cmds = ""
for root, dirs, files in os.walk(artifacts_dir):
assert root.startswith(artifacts_dir)
for file in files:
local_path = os.path.join(root, file)
remote_path = os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], file)
sftp_cmds += "\nput %s %s" % (local_path, remote_path)
for dir in dirs:
sftp_mkdir(os.path.join("public_html", kafka_output_dir, root[len(artifacts_dir)+1:], dir))
if sftp_cmds:
cmd("Uploading artifacts in %s to your Apache home directory" % root, "sftp -b - %s@home.apache.org" % apache_id, stdin=sftp_cmds)
with open(os.path.expanduser("~/.gradle/gradle.properties")) as f:
contents = f.read()
if not user_ok("Going to build and upload mvn artifacts based on these settings:\n" + contents + '\nOK (y/n)?: '):
fail("Retry again later")
cmd("Building and uploading archives", "./gradlew uploadArchivesAll", cwd=kafka_dir, env=jdk7_env)
cmd("Building and uploading archives", "./gradlew uploadCoreArchives_2_12 -PscalaVersion=2.12", cwd=kafka_dir, env=jdk8_env)
cmd("Building and uploading archives", "mvn deploy -Pgpg-signing", cwd=streams_quickstart_dir, env=jdk7_env)
release_notification_props = { 'release_version': release_version,
'rc': rc,
'rc_tag': rc_tag,
'rc_githash': rc_githash,
'dev_branch': dev_branch,
'docs_version': docs_version,
'apache_id': apache_id,
}
# TODO: Many of these suggested validation steps could be automated and would help pre-validate a lot of the stuff voters test
print("""
*******************************************************************************************************************************************************
Ok. We've built and staged everything for the %(rc_tag)s.
Now you should sanity check it before proceeding. All subsequent steps start making RC data public.
Some suggested steps:
* Grab the source archive and make sure it compiles: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz
* Grab one of the binary distros and run the quickstarts against them: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s.tgz
* Extract and verify one of the site docs jars: http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka_2.11-%(release_version)s-site-docs.tgz
* Build a sample against jars in the staging repo: (TODO: Can we get a temporary URL before "closing" the staged artifacts?)
* Validate GPG signatures on at least one file:
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.asc &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.md5 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha1 &&
wget http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/kafka-%(release_version)s-src.tgz.sha512 &&
gpg --verify kafka-%(release_version)s-src.tgz.asc kafka-%(release_version)s-src.tgz &&
gpg --print-md md5 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.md5 &&
gpg --print-md sha1 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha1 &&
gpg --print-md sha512 kafka-%(release_version)s-src.tgz | diff - kafka-%(release_version)s-src.tgz.sha512 &&
rm kafka-%(release_version)s-src.tgz* &&
echo "OK" || echo "Failed"
* Validate the javadocs look ok. They are at http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
*******************************************************************************************************************************************************
""" % release_notification_props)
if not user_ok("Have you sufficiently verified the release artifacts (y/n)?: "):
fail("Ok, giving up")
print("Next, we need to get the Maven artifacts we published into the staging repository.")
# TODO: Can we get this closed via a REST API since we already need to collect credentials for this repo?
print("Go to https://repository.apache.org/#stagingRepositories and hit 'Close' for the new repository that was created by uploading artifacts.")
if not user_ok("Have you successfully deployed the artifacts (y/n)?: "):
fail("Ok, giving up")
if not user_ok("Ok to push RC tag %s (y/n)?: " % rc_tag):
fail("Ok, giving up")
cmd("Pushing RC tag", "git push %s %s" % (PUSH_REMOTE_NAME, rc_tag))
# Move back to starting branch and clean out the temporary release branch (e.g. 1.0.0) we used to generate everything
cmd("Resetting repository working state", "git reset --hard HEAD && git checkout %s" % starting_branch, shell=True)
cmd("Deleting git branches %s" % release_version, "git branch -D %s" % release_version, shell=True)
email_contents = """
To: dev@kafka.apache.org, users@kafka.apache.org, kafka-clients@googlegroups.com
Subject: [VOTE] %(release_version)s RC%(rc)s
Hello Kafka users, developers and client-developers,
This is the first candidate for release of Apache Kafka %(release_version)s.
<DESCRIPTION OF MAJOR CHANGES, INCLUDE INDICATION OF MAJOR/MINOR RELEASE>
Release notes for the %(release_version)s release:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/RELEASE_NOTES.html
*** Please download, test and vote by <VOTING DEADLINE, e.g. Monday, March 28, 9am PT>
Kafka's KEYS file containing PGP keys we use to sign the release:
http://kafka.apache.org/KEYS
* Release artifacts to be voted upon (source and binary):
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/
* Maven artifacts to be voted upon:
https://repository.apache.org/content/groups/staging/
* Javadoc:
http://home.apache.org/~%(apache_id)s/kafka-%(rc_tag)s/javadoc/
* Tag to be voted upon (off %(dev_branch)s branch) is the %(release_version)s tag:
https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=tag;h=%(rc_githash)s
* Documentation:
http://kafka.apache.org/%(docs_version)s/documentation.html
* Protocol:
http://kafka.apache.org/%(docs_version)s/protocol.html
* Successful Jenkins builds for the %(dev_branch)s branch:
Unit/integration tests: https://builds.apache.org/job/kafka-%(dev_branch)s-jdk7/<BUILD NUMBER>/
System tests: https://jenkins.confluent.io/job/system-test-kafka-%(dev_branch)s/<BUILD_NUMBER>/
/**************************************
Thanks,
<YOU>
""" % release_notification_props
print()
print()
print("*****************************************************************")
print()
print(email_contents)
print()
print("*****************************************************************")
print()
print("All artifacts should now be fully staged. Use the above template to send the announcement for the RC to the mailing list.")
print("IMPORTANT: Note that there are still some substitutions that need to be made in the template:")
print(" - Describe major changes in this release")
print(" - Deadline for voting, which should be at least 3 days after you send out the email")
print(" - Jenkins build numbers for successful unit & system test builds")
print(" - Fill in your name in the signature")
print(" - Finally, validate all the links before shipping!")
print("Note that all substitutions are annotated with <> around them.")
|
apache-2.0
| -7,052,171,143,124,083,000
| 47.523346
| 275
| 0.670222
| false
| 3.489716
| true
| false
| false
|
googleapis/python-dialogflow
|
google/cloud/dialogflow_v2/services/versions/transports/base.py
|
1
|
8840
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.cloud.dialogflow_v2.types import version
from google.cloud.dialogflow_v2.types import version as gcd_version
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
_API_CORE_VERSION = google.api_core.__version__
class VersionsTransport(abc.ABC):
"""Abstract transport class for Versions."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes or self.AUTH_SCOPES
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): These two class methods are in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-api-core
# and google-auth are increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
# TODO: Remove this function once google-api-core >= 1.26.0 is required
@classmethod
def _get_self_signed_jwt_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Union[Optional[Sequence[str]], str]]:
"""Returns kwargs to pass to grpc_helpers.create_channel depending on the google-api-core version"""
self_signed_jwt_kwargs: Dict[str, Union[Optional[Sequence[str]], str]] = {}
if _API_CORE_VERSION and (
packaging.version.parse(_API_CORE_VERSION)
>= packaging.version.parse("1.26.0")
):
self_signed_jwt_kwargs["default_scopes"] = cls.AUTH_SCOPES
self_signed_jwt_kwargs["scopes"] = scopes
self_signed_jwt_kwargs["default_host"] = cls.DEFAULT_HOST
else:
self_signed_jwt_kwargs["scopes"] = scopes or cls.AUTH_SCOPES
return self_signed_jwt_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_versions: gapic_v1.method.wrap_method(
self.list_versions, default_timeout=None, client_info=client_info,
),
self.get_version: gapic_v1.method.wrap_method(
self.get_version, default_timeout=None, client_info=client_info,
),
self.create_version: gapic_v1.method.wrap_method(
self.create_version, default_timeout=None, client_info=client_info,
),
self.update_version: gapic_v1.method.wrap_method(
self.update_version, default_timeout=None, client_info=client_info,
),
self.delete_version: gapic_v1.method.wrap_method(
self.delete_version, default_timeout=None, client_info=client_info,
),
}
@property
def list_versions(
self,
) -> Callable[
[version.ListVersionsRequest],
Union[version.ListVersionsResponse, Awaitable[version.ListVersionsResponse]],
]:
raise NotImplementedError()
@property
def get_version(
self,
) -> Callable[
[version.GetVersionRequest], Union[version.Version, Awaitable[version.Version]]
]:
raise NotImplementedError()
@property
def create_version(
self,
) -> Callable[
[gcd_version.CreateVersionRequest],
Union[gcd_version.Version, Awaitable[gcd_version.Version]],
]:
raise NotImplementedError()
@property
def update_version(
self,
) -> Callable[
[gcd_version.UpdateVersionRequest],
Union[gcd_version.Version, Awaitable[gcd_version.Version]],
]:
raise NotImplementedError()
@property
def delete_version(
self,
) -> Callable[
[version.DeleteVersionRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("VersionsTransport",)
|
apache-2.0
| -1,845,300,222,030,163,700
| 36.457627
| 108
| 0.637557
| false
| 4.154135
| false
| false
| false
|
JaDogg/__py_playground
|
reference/sketchbook/chess/chess.py
|
1
|
8066
|
"""
Chess board
No computer player yet
Sucks in other ways too
TO DO: look over http://home.hccnet.nl/h.g.muller/max-src2.html
"""
## b = InitialChessBoard()
## print str(b)
#. rnbqkbnr
#. pppppppp
#.
#.
#.
#.
#. PPPPPPPP
#. RNBQKBNR
## pw = HumanPlayer(white)
## pb = HumanPlayer(black)
## b.outcome
## ' '.join(sorted(map(str, b.get_moves())))
#. 'a2-a3 a2-a4 b1-a3 b1-c3 b2-b3 b2-b4 c2-c3 c2-c4 d2-d3 d2-d4 e2-e3 e2-e4 f2-f3 f2-f4 g1-f3 g1-h3 g2-g3 g2-g4 h2-h3 h2-h4 resign'
## m = b.parse_move('resign')
## b1 = m.update(b)
## b1.outcome
#. 'black'
def main():
print "(Moves look like 'e2-e3')"
play_chess(HumanPlayer, HumanPlayer)
def play_chess(white_strategy, black_strategy):
return play(InitialChessBoard(), [white_strategy, black_strategy])
def play(board, strategies):
players = [strategy(side)
for strategy, side in zip(strategies, board.get_sides())]
while board.get_outcome() is None:
board = board.play_turn(players)
for player in players:
player.on_game_over(board)
class HumanPlayer:
def __init__(self, side):
self.side = side
def pick_move(self, board):
board.show()
while True:
string = raw_input('%s, your move? ' % self.side.capitalize())
try:
move = board.parse_move(string)
except MoveIllegal:
print 'Illegal move.'
else:
return move
def on_game_over(self, board):
board.show()
if board.get_outcome() is None:
pass
elif board.get_outcome() == self.side:
print '%s, you win!' % self.side.capitalize()
elif board.get_outcome() == 'draw':
print 'You draw.'
else:
print '%s, you lose!' % self.side.capitalize()
def InitialChessBoard():
squares = ['----------',
'-rnbqkbnr-',
'-pppppppp-',
'- -',
'- -',
'- -',
'- -',
'-PPPPPPPP-',
'-RNBQKBNR-',
'----------',]
return ChessBoard(white, squares, (False, False), None)
class MoveIllegal(Exception):
pass
class ChessBoard:
def __init__(self, mover, squares, castled, outcome):
self.mover = mover
self.squares = squares
self.castled = castled
self.outcome = outcome
def __str__(self):
return '\n'.join(line[1:-1] for line in self.squares[1:-1])
def has_castled(self, player):
return self.castled[player == black]
def get_outcome(self):
"Return None, 'draw', black, or white (meaning the winner)."
return self.outcome
def resign(self):
return ChessBoard(opponent(self.mover),
self.squares,
self.castled,
opponent(self.mover))
def move_piece(self, (r0, c0), (r1, c1)):
squares = list(map(list, self.squares))
piece = squares[r0][c0]
squares[r0][c0] = ' '
squares[r1][c1] = piece
return ChessBoard(opponent(self.mover),
list(map(''.join, squares)),
self.castled,
None) # XXX check for checkmate or draw
def show(self):
print self
def get_sides(self):
return (white, black)
def play_turn(self, (white_player, black_player)):
player = white_player if self.mover == white else black_player
move = player.pick_move(self)
if move in self.get_moves():
return move.update(self)
raise Exception("Bad move")
def parse_move(self, string):
for move in self.get_moves():
if move.matches(string):
return move
raise MoveIllegal()
def get_moves(self):
return [ResignMove()] + self.get_piece_moves()
def get_piece_moves(self):
return sum(map(self.moves_from, self.army(self.mover)), [])
def army(self, player):
for r, row in enumerate(self.squares):
for c, piece in enumerate(row):
if piece.isalpha() and piece.isupper() == (player == white):
yield r, c
def moves_from(self, pos):
return list(self.gen_moves_from(pos))
def gen_moves_from(self, (r, c)):
piece = self.squares[r][c]
piece, white = piece.upper(), piece.isupper()
def is_takeable(r1, c1):
return is_empty(r1, c1) or has_opponent(r1, c1)
def is_empty(r1, c1):
return self.squares[r1][c1] == ' '
def has_opponent(r1, c1):
there = self.squares[r1][c1]
return there.isalpha() and there.isupper() != white
def move_to(r1, c1):
return PieceMove((r, c), (r1, c1))
def move_freely(dirs):
for dr, dc in dirs:
for i in range(1, 9):
if is_empty(r+dr*i, c+dc*i):
yield move_to(r+dr*i, c+dc*i)
else:
if has_opponent(r+dr*i, c+dc*i):
yield move_to(r+dr*i, c+dc*i)
break
if piece in ' -':
pass
elif piece == 'P':
# TODO: pawn promotion
# TODO: en passant
forward = -1 if white else 1
if is_empty(r+forward, c):
yield move_to(r+forward, c)
if r == (7 if white else 2): # initial 2 steps
if is_empty(r+forward*2, c): yield move_to(r+forward*2, c)
if has_opponent(r+forward, c-1): yield move_to(r+forward, c-1)
if has_opponent(r+forward, c+1): yield move_to(r+forward, c+1)
elif piece == 'K':
# TODO castling
# TODO forbid moving into check
# (and this can apply to moves of other pieces)
for dr, dc in queen_dirs:
if is_takeable(r+dr, c+dc):
yield move_to(r+dr, c+dc)
elif piece == 'Q':
for move in move_freely(queen_dirs): yield move
elif piece == 'R':
for move in move_freely(rook_dirs): yield move
elif piece == 'B':
for move in move_freely(bishop_dirs): yield move
elif piece == 'N':
for dr, dc in knight_jumps:
if 1 <= r+dr <= 8 and 1 <= c+dc <= 8:
if is_takeable(r+dr, c+dc):
yield move_to(r+dr, c+dc)
else:
assert False
rook_dirs = [( 0, 1), ( 0,-1), ( 1, 0), (-1, 0)]
bishop_dirs = [(-1,-1), (-1, 1), ( 1,-1), ( 1, 1)]
queen_dirs = rook_dirs + bishop_dirs
knight_jumps = [( 2, 1), ( 2,-1), ( 1, 2), ( 1,-2),
(-2, 1), (-2,-1), (-1, 2), (-1,-2)]
white, black = 'white', 'black'
def opponent(side):
return black if side == white else white
class ResignMove:
def __eq__(self, other):
return isinstance(other, ResignMove)
def update(self, board):
return board.resign()
def matches(self, string):
return string.lower() == 'resign'
def matches(self, string):
return string.lower() == str(self)
def __str__(self):
return 'resign'
class PieceMove:
def __init__(self, from_pos, to_pos):
self.from_pos = from_pos
self.to_pos = to_pos
def __eq__(self, other):
return (isinstance(other, PieceMove)
and self.from_pos == other.from_pos
and self.to_pos == other.to_pos)
def update(self, board):
return board.move_piece(self.from_pos, self.to_pos)
def matches(self, string):
return string.lower() == str(self)
def __str__(self):
# XXX 'a' is top of board for Black?
fr, fc = self.from_pos
tr, tc = self.to_pos
return '%s%d-%s%d' % ('abcdefgh'[fc-1], 9-fr,
'abcdefgh'[tc-1], 9-tr)
if __name__ == '__main__':
main()
|
mit
| -2,925,017,823,287,011,300
| 30.263566
| 131
| 0.508926
| false
| 3.376308
| false
| false
| false
|
vladiibine/whispy_lispy
|
src/whispy_lispy/cst.py
|
1
|
4255
|
# -*- coding utf-8 -*-
"""
Concrete syntax tree stuff
Lexer should return tokens that are instances of classes found here
"""
from __future__ import unicode_literals
import six
from whispy_lispy import keywords
class CSTError(Exception):
pass
class Token(object):
"""Concrete syntax tree node.
Can represent a literal, operator, a name, or an atom.
An atom is an ordered list of the previously mentioned elements
"""
__slots__ = ['value', 'source', 'index']
def __init__(self, value, source=None, index=None):
"""
:param value: the value of the token (python type)
:param str source: the source code
:param int index: the index of the token in the source code
"""
self.value = value
self.source = source
self.index = index
def __repr__(self):
return '<T {}>'.format(self.value)
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Token):
return False
return self.value == other.value
class ConcreteSyntaxNode(object):
"""A node in the concrete syntax tree.
The state of this node is kept as a tuple
"""
__slots__ = ['values']
def __init__(self, values):
"""
The tuple either contains other nodes, or values. Not both!
:type values: tuple
"""
types = set(type(elem) for elem in values)
if len(types) > 1:
raise CSTError(
"Concrete Syntax Node should contain either other nodes, or "
"simple values, not both. This node contains {} value(s): {}"
.format(len(types), values)
)
self.values = values
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, self.__class__):
return False
return self.values == other.values
def __repr__(self):
return '<cN {}>'.format(self.values)
def is_operator(self):
return (
len(self.values) == 1 and
self.values[0] in keywords.OPERATORS
)
def is_root(self):
return isinstance(self, RootConcreteSyntaxnode)
def is_leaf(self):
return all(
not isinstance(elem, ConcreteSyntaxNode) for elem in self.values)
def is_symbol(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], six.string_types)
)
def is_int(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], int)
)
def is_float(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], float)
)
def is_bool(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], bool)
)
def is_string(self):
return (
len(self.values) == 1 and
isinstance(self.values[0], six.string_types) and
self.values[0][0] == '"' and
self.values[0][-1] == '"'
)
def symbol_equals(self, param):
if not self.is_symbol():
raise CSTError('Node is not a symbol')
return self.values[0] == param
def symbol_in_iterable(self, iterable):
for elem in iterable:
if self.symbol_equals(elem):
return True
return False
class RootConcreteSyntaxnode(ConcreteSyntaxNode):
def __repr__(self):
return '<RcN {}>'.format(self.values)
class NestingCommand(Token):
"""Represents a command to either increment or decrement the tree level
"""
def __repr__(self):
return '{}'.format(self.value[0])
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.value == other.value
class IncrementNesting(NestingCommand):
def __init__(self, _=None, source=None, index=None):
super(IncrementNesting, self).__init__(['<INC>'], source, index)
class DecrementNesting(NestingCommand):
def __init__(self, _=None, source=None, index=None):
super(DecrementNesting, self).__init__(['<DEC>'], source, index)
|
mit
| -7,624,388,774,132,405,000
| 25.761006
| 77
| 0.564512
| false
| 4.037002
| false
| false
| false
|
mcStargazer/nlp_talk_apr2017
|
nlp_demo.py
|
1
|
5920
|
# -*- coding: utf-8 -*-
##############################################################################
# references
##############################################################################
# www.udemy.com/machinelearning/ - I really enjoyed this course. Take it!
# original data/code at www.superdatascience.com/machine-learning/
# en.wikipedia.org/wiki/Natural_language_processing
##############################################################################
# import the libraries
##############################################################################
# look to the future if running on Python 2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# importing the standard libraries
import os
import sys
# importing 3rd party libraries
#import nltk # run this import and next line if stopwords
#nltk.download('stopwords') # are not already downloaded to your computer
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import numpy as np
import pandas as pd
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split as split
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.naive_bayes import GaussianNB as GNB
from sklearn.ensemble import RandomForestClassifier as RFC
# importing local
sys.path.append(os.path.abspath('.'))
##############################################################################
# prepare the data: read and clean
##############################################################################
# read the datasets
dataset = pd.read_csv('Restaurant_Reviews.tsv', delimiter = '\t', quoting = 3)
common_words = set(stopwords.words('english')) # sets are faster
# clean the text
corpus = [] # a list to hold the results
ps = PorterStemmer() # lower sparsity by stemming
for i in range(0, len(dataset['Review'])):
#i=0; i=1; i=2
review = dataset['Review'][i] # get the i'th review
review = re.sub('[^a-zA-Z]', ' ', review) # spacify non-letters
review = review.lower() # make all lowercase
review = review.split() # create iteratable
review = [ps.stem(word) for word in review # stem the words
if not word in common_words] # exclude stop words
corpus.append( ' '.join(review) )
##############################################################################
# fit and assess the model
##############################################################################
# set variables for the run
features = 1000 # number of words to keep in the model
method = "GNB" # methods include GNB, DTC, or RFC
folds = 30 # number of cross-folds to perform
verbose = 0 # if non-zero, prints metrics for each fold
# begin reporting
print("\nUsing {} Classifier: {} features, {} folds".format(method,
features,
folds))
header = "{:>8s},{:>9s},{:>10s},{:>13s},{:>8s}"
rows = "{:8d},{:>9.3f},{:>10.3f},{:>13.3f},{:>8.3f}"
if verbose:
print(header.format("n-fold","accuracy","precision","completeness","f1"))
# use the bag-of-words model to create X and y
cv = CountVectorizer(max_features = features)
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, 1].values
# run across multiple folds
m = {'a':[], 'p':[], 'c':[], 'f1':[]} # dict to hold n-fold metrics
for n in range(folds):
# Splitting the dataset into the Training set and Test set
X_train, X_test, y_train, y_test = split(X, y, test_size=0.20)
# Use any appropriate classifier.
# Commonly: Naive Bayes, Decision Trees, and Random Forests.
# Also: CART, C5.0, Maximum Entropy
if method == "GNB":
classifier = GNB()
if method == "DTC":
classifier = DTC(criterion='entropy', random_state=0)
if method == "RFC":
classifier = RFC(n_estimators=10, criterion='entropy', random_state=0)
# fit the machine learning algorithm and predict the test set results
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
# making the confusion matrix and derived metrics, and storing them
cm = confusion_matrix(y_test, y_pred)
a = (cm[0,0] + cm[1,1])/np.sum(cm) # accuracy = (TP+TN)/(TP+TN+FP+FN)
p = cm[0,0]/(cm[0,0] + cm[1,0]) # precision = TP/(TP+FP)
c = cm[0,0]/(cm[0,0] + cm[0,1]) # completeness = TP/(TP+FN)
f1 = 2*p*c/(p + c) # blend of precision and completeness
m['a'].append(a)
m['p'].append(p)
m['c'].append(c)
m['f1'].append(f1)
# report metrics for each fold
if verbose:
print(rows.format(n+1, a, p, c, f1))
# report summary of metrics
print("\n accuracy, precision, completeness, f1")
print(" minima", rows[6:].format(min(m['a']), min(m['p']),
min(m['c']), min(m['f1'])))
print(" mean", rows[6:].format(np.mean(m['a']), np.mean(m['p']),
np.mean(m['c']), np.mean(m['f1'])))
print(" maxima", rows[6:].format(max(m['a']), max(m['p']),
max(m['c']), max(m['f1'])))
##############################################################################
# where I am going from here...
##############################################################################
# continue exploring the parameter space balancing fit with appropriateness
# study word2vec and globe data models, other stemming algorithms
# www.udemy.com/natural-language-processing-with-deep-learning-in-python/
# www.udemy.com/data-science-natural-language-processing-in-python/
|
mit
| -1,762,223,717,350,091,000
| 39.827586
| 78
| 0.538176
| false
| 3.899868
| true
| false
| false
|
ciaracdb/ACMusic
|
acmusic/settings.py
|
1
|
4739
|
"""
Django settings for gettingstarted project, on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: change this before deploying to production!
SECRET_KEY = 'i+acxn5(akgsn!sr4^ghjqgf(^m&*@+g1@u^46gt@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'webpack_loader',
'dbapi.apps.DbapiConfig',
'mainsite.apps.MainsiteConfig'
)
MIDDLEWARE_CLASSES = (
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'acmusic.urls'
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'PAGE_SIZE': 10
}
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'bundles/', # must end with slash
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': ['.+\.hot-update.js', '.+\.map']
}
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'acmusic.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
DATABASES['default']['TEST'] = {'NAME': DATABASES['default']['NAME']}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
os.path.join(BASE_DIR, 'reactapp')
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
|
apache-2.0
| 228,340,471,517,918,800
| 28.993671
| 91
| 0.685588
| false
| 3.406902
| false
| false
| false
|
ContinuumIO/ashiba
|
enaml/enaml/qt/qt_dock_area.py
|
1
|
8630
|
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import os
from atom.api import Typed
from enaml.widgets.dock_area import ProxyDockArea
from enaml.widgets.dock_events import DockItemEvent
from .QtCore import QObject, QEvent, QSize, QTimer
from .QtGui import QTabWidget
from .docking.dock_manager import DockManager
from .docking.event_types import (
DockItemDocked, DockItemUndocked, DockItemExtended, DockItemRetracted,
DockItemShown, DockItemHidden, DockItemClosed, DockTabSelected
)
from .docking.q_dock_area import QDockArea
from .docking.style_sheets import get_style_sheet
from .qt_constraints_widget import QtConstraintsWidget
from .qt_dock_item import QtDockItem
TAB_POSITIONS = {
'top': QTabWidget.North,
'bottom': QTabWidget.South,
'left': QTabWidget.West,
'right': QTabWidget.East,
}
EVENT_TYPES = {
DockItemDocked: DockItemEvent.Docked,
DockItemUndocked: DockItemEvent.Undocked,
DockItemExtended: DockItemEvent.Extended,
DockItemRetracted: DockItemEvent.Retracted,
DockItemShown: DockItemEvent.Shown,
DockItemHidden: DockItemEvent.Hidden,
DockItemClosed: DockItemEvent.Closed,
DockTabSelected: DockItemEvent.TabSelected,
}
class DockLayoutFilter(QObject):
""" An event filter used by the QtDockArea.
This event filter listens for LayoutRequest events on the dock
area widget, and will send a size_hint_updated notification to
the constraints system when the dock area size hint changes. The
notifications are collapsed on a single shot timer so that the
dock area geometry can fully settle before being snapped by the
constraints layout engine.
"""
def __init__(self, owner):
super(DockLayoutFilter, self).__init__()
self._owner = owner
self._size_hint = QSize()
self._pending = False
self._timer = timer = QTimer()
timer.setSingleShot(True)
timer.timeout.connect(self.onNotify)
def onNotify(self):
self._owner.size_hint_updated()
self._pending = False
def eventFilter(self, obj, event):
if not self._pending and event.type() == QEvent.LayoutRequest:
hint = obj.sizeHint()
if hint != self._size_hint:
self._size_hint = hint
self._timer.start(0)
self._pending = True
return False
class DockEventFilter(QObject):
""" An event filter used by the QtDockArea.
This event filter listens for dock events on the dock area widget,
converts them to front-end events, and posts them to the front-end
declaration object.
"""
def __init__(self, owner):
super(DockEventFilter, self).__init__()
self._owner = owner
def eventFilter(self, obj, event):
e_type = EVENT_TYPES.get(event.type())
if e_type is not None:
d = self._owner.declaration
if d is not None:
d.dock_event(DockItemEvent(type=e_type, name=event.name()))
return False
class QtDockArea(QtConstraintsWidget, ProxyDockArea):
""" A Qt implementation of an Enaml DockArea.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QDockArea)
#: The docking manager which will drive the dock area.
manager = Typed(DockManager)
#: The event filter which listens for layout requests.
dock_layout_filter = Typed(DockLayoutFilter)
#: The event filter which listens for dock events.
dock_event_filter = Typed(DockEventFilter)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying QDockArea widget.
"""
self.widget = QDockArea(self.parent_widget())
self.manager = DockManager(self.widget)
self.dock_event_filter = DockEventFilter(self)
self.dock_layout_filter = DockLayoutFilter(self)
def init_widget(self):
""" Initialize the underlying widget.
"""
super(QtDockArea, self).init_widget()
d = self.declaration
self.set_tab_position(d.tab_position)
self.set_live_drag(d.live_drag)
if d.style:
self.set_style(d.style)
self.set_dock_events_enabled(d.dock_events_enabled)
def init_layout(self):
""" Initialize the layout of the underlying control.
"""
super(QtDockArea, self).init_layout()
manager = self.manager
for item in self.dock_items():
manager.add_item(item)
d = self.declaration
self.apply_layout(d.layout)
self.widget.installEventFilter(self.dock_layout_filter)
def destroy(self):
""" A reimplemented destructor.
This removes the event filter from the dock area and releases
the items from the dock manager.
"""
self.widget.removeEventFilter(self.dock_layout_filter)
self.widget.removeEventFilter(self.dock_event_filter)
del self.dock_layout_filter
del self.dock_event_filter
self.manager.destroy()
super(QtDockArea, self).destroy()
#--------------------------------------------------------------------------
# Utility Methods
#--------------------------------------------------------------------------
def dock_items(self):
""" Get an iterable of QDockItem children for this area.
"""
for d in self.declaration.dock_items():
w = d.proxy.widget
if w is not None:
yield w
#--------------------------------------------------------------------------
# Child Events
#--------------------------------------------------------------------------
def child_added(self, child):
""" Handle the child added event for a QtDockArea.
"""
super(QtDockArea, self).child_added(child)
if isinstance(child, QtDockItem):
w = child.widget
if w is not None:
self.manager.add_item(w)
def child_removed(self, child):
""" Handle the child removed event for a QtDockArea.
"""
super(QtDockArea, self).child_removed(child)
if isinstance(child, QtDockItem):
w = child.widget
if w is not None:
self.manager.remove_item(w)
#--------------------------------------------------------------------------
# ProxyDockArea API
#--------------------------------------------------------------------------
def set_tab_position(self, position):
""" Set the default tab position on the underyling widget.
"""
self.widget.setTabPosition(TAB_POSITIONS[position])
def set_live_drag(self, live_drag):
""" Set the live drag state for the underlying widget.
"""
self.widget.setOpaqueItemResize(live_drag)
def set_style(self, style):
""" Set the style for the underlying widget.
"""
self.widget.setStyleSheet(get_style_sheet(style))
def set_dock_events_enabled(self, enabled):
""" Set whether or not dock events are enabled for the area.
"""
widget = self.widget
widget.setDockEventsEnabled(enabled)
if enabled:
widget.installEventFilter(self.dock_event_filter)
else:
widget.removeEventFilter(self.dock_event_filter)
def save_layout(self):
""" Save the current layout on the underlying widget.
"""
layout = self.manager.save_layout()
if os.environ.get('ENAML_DEPRECATED_DOCK_LAYOUT'):
from enaml.layout.dock_layout import convert_to_old_docklayout
layout = convert_to_old_docklayout(layout)
return layout
def apply_layout(self, layout):
""" Apply a new layout to the underlying widget.
"""
if os.environ.get('ENAML_DEPRECATED_DOCK_LAYOUT'):
from enaml.layout.dock_layout import convert_to_new_docklayout
layout = convert_to_new_docklayout(layout)
self.manager.apply_layout(layout)
def update_layout(self, ops):
""" Update the layout from a list of layout operations.
"""
self.manager.update_layout(ops)
|
bsd-3-clause
| 3,861,476,787,407,625,700
| 32.065134
| 79
| 0.585747
| false
| 4.236622
| false
| false
| false
|
dstroppa/openstack-smartos-nova-grizzly
|
nova/virt/driver.py
|
1
|
37731
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo.config import cfg
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
'vmwareapi.VMwareESXDriver, vmwareapi.VMwareVCDriver, '
'smartosapi.SmartOSDriver'),
cfg.StrOpt('default_ephemeral_format',
default=None,
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""
Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_spice_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, network_info):
"""Attach an interface to the instance."""
raise NotImplementedError()
def detach_interface(self, instance, network_info):
"""Detach an interface from the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info,
block_device_info=None):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
"""Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, instance):
"""Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance."""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, ctxt, instance_ref,
block_device_info, network_info,
migrate_data=None):
"""Prepare an instance for live migration
:param ctxt: security context
:param instance_ref: instance object that will be migrated
:param block_device_info: instance block device information
:param network_info: instance network information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def pre_block_migration(self, ctxt, instance_ref, disk_info):
"""Prepare a block device for migration
:param ctxt: security context
:param instance_ref: instance object that will have its disk migrated
:param disk_info: information about disk to be migrated (as returned
from get_instance_disk_info())
"""
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:params ctxt: security context
:params instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:params dest: destination host
:params post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:params recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
:params block_migration: if true, migrate VM disk.
:params migrate_data: implementation specific params.
"""
raise NotImplementedError()
def post_live_migration_at_destination(self, ctxt, instance_ref,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param ctxt: security context
:param instance_ref: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_can_live_migrate_destination(self, ctxt, instance_ref,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param ctxt: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, ctxt,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param ctxt: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, ctxt, instance_ref,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""
Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""True if the driver requires the legacy network_info format."""
# TODO(tr3buchet): update all subclasses and remove this method and
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
#NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
stats = self.get_host_stats(refresh=True)
if not isinstance(stats, list):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class."""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread."""
if not self._compute_event_callback:
LOG.debug("Discarding event %s" % str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s" % str(event))
self._compute_event_callback(event)
except Exception, ex:
LOG.error(_("Exception dispatching event %(event)s: %(ex)s")
% locals())
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError as e:
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver.endswith(match)
|
apache-2.0
| 6,519,940,940,505,130,000
| 39.310897
| 79
| 0.64496
| false
| 4.757408
| false
| false
| false
|
wegamekinglc/alpha-mind
|
alphamind/benchmarks/portfolio/rankbuild.py
|
1
|
3088
|
# -*- coding: utf-8 -*-
"""
Created on 2017-4-27
@author: cheng.li
"""
import datetime as dt
import numpy as np
import pandas as pd
from alphamind.portfolio.rankbuilder import rank_build
def benchmark_build_rank(n_samples: int, n_loops: int, n_included: int) -> None:
print("-" * 60)
print("Starting portfolio construction by rank benchmarking")
print("Parameters(n_samples: {0}, n_included: {1}, n_loops: {2})".format(n_samples, n_included,
n_loops))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = rank_build(x, n_included)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
exp_weights = np.zeros((len(x), n_portfolio))
choosed_index = (-x).argsort(axis=0).argsort(axis=0) < n_included
for j in range(n_portfolio):
exp_weights[choosed_index[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
def benchmark_build_rank_with_group(n_samples: int, n_loops: int, n_included: int,
n_groups: int) -> None:
print("-" * 60)
print("Starting portfolio construction by rank with group-by values benchmarking")
print(
"Parameters(n_samples: {0}, n_included: {1}, n_loops: {2}, n_groups: {3})".format(n_samples,
n_included,
n_loops,
n_groups))
n_portfolio = 10
x = np.random.randn(n_samples, n_portfolio)
groups = np.random.randint(n_groups, size=n_samples)
start = dt.datetime.now()
for _ in range(n_loops):
calc_weights = rank_build(x, n_included, groups=groups)
impl_model_time = dt.datetime.now() - start
print('{0:20s}: {1}'.format('Implemented model', impl_model_time))
start = dt.datetime.now()
for _ in range(n_loops):
grouped_ordering = pd.DataFrame(-x).groupby(groups).rank()
exp_weights = np.zeros((len(x), n_portfolio))
masks = (grouped_ordering <= n_included).values
for j in range(n_portfolio):
exp_weights[masks[:, j], j] = 1.
benchmark_model_time = dt.datetime.now() - start
np.testing.assert_array_almost_equal(calc_weights, exp_weights)
print('{0:20s}: {1}'.format('Benchmark model', benchmark_model_time))
if __name__ == '__main__':
benchmark_build_rank(3000, 1000, 300)
benchmark_build_rank_with_group(3000, 1000, 10, 30)
|
mit
| 4,565,990,175,895,864,300
| 34.761905
| 101
| 0.540479
| false
| 3.658768
| false
| false
| false
|
ryanmiao/libvirt-test-API
|
repos/setVcpus/vcpupin_config.py
|
1
|
3547
|
#!/usr/bin/env python
# Test domain vcpu pin with flag VIR_DOMAIN_AFFECT_CONFIG, check
# domain config xml with vcpupin configuration.
import re
from xml.dom import minidom
import libvirt
from libvirt import libvirtError
from src import sharedmod
from utils import utils
required_params = ('guestname', 'vcpu', 'cpulist',)
optional_params = {}
def vcpupin_check(domobj, vcpu, cpumap):
"""check domain config xml with vcpupin element
"""
guestxml = domobj.XMLDesc(2)
logger.debug("domain %s xml :\n%s" %(domobj.name(), guestxml))
doc = minidom.parseString(guestxml)
vcpupin = doc.getElementsByTagName('vcpupin')
if not vcpupin:
logger.error("no vcpupin element in domain xml")
return 1
for i in range(len(vcpupin)):
if vcpupin[i].hasAttribute('vcpu') and \
vcpupin[i].hasAttribute('cpuset'):
vcpu_attr = vcpupin[i].getAttributeNode('vcpu')
cpu_attr = vcpupin[i].getAttributeNode('cpuset')
if int(vcpu_attr.nodeValue) == vcpu:
cpulist = cpu_attr.nodeValue
if cpulist == '':
cpumap_tmp = ()
for i in range(maxcpu):
cpumap_tmp += (False,)
else:
cpumap_tmp = utils.param_to_tuple(cpulist, maxcpu)
if cpumap_tmp == cpumap:
logger.info("cpuset is as expected in domain xml")
return 0
else:
logger.error("cpuset is not as expected in domain xml")
return 1
if i == len(vcpupin) - 1:
logger.error("the vcpupin element with given vcpu is not found")
return 1
def vcpupin_config(params):
"""pin domain vcpu to host cpu with config flag
"""
global logger
logger = params['logger']
params.pop('logger')
guestname = params['guestname']
vcpu = int(params['vcpu'])
cpulist = params['cpulist']
logger.info("the name of virtual machine is %s" % guestname)
logger.info("the given vcpu is %s" % vcpu)
logger.info("the given cpulist is %s" % cpulist)
global maxcpu
maxcpu = utils.get_host_cpus()
logger.info("%s physical cpu on host" % maxcpu)
conn = sharedmod.libvirtobj['conn']
try:
domobj = conn.lookupByName(guestname)
cpumap = utils.param_to_tuple(cpulist, maxcpu)
if not cpumap:
logger.error("cpulist: Invalid format")
return 1
logger.debug("cpumap for vcpu pin is:")
logger.debug(cpumap)
logger.info("pin domain vcpu %s to host cpulist %s with flag: %s" %
(vcpu, cpulist, libvirt.VIR_DOMAIN_AFFECT_CONFIG))
domobj.pinVcpuFlags(vcpu, cpumap, libvirt.VIR_DOMAIN_AFFECT_CONFIG)
logger.info("check vcpu pin info")
ret = domobj.vcpuPinInfo(libvirt.VIR_DOMAIN_AFFECT_CONFIG)
logger.debug("vcpu pin info is:")
logger.debug(ret)
if ret[vcpu] == cpumap:
logger.info("vcpu pin info is expected")
else:
logger.error("vcpu pin info is not expected")
return 1
except libvirtError, e:
logger.error("libvirt call failed: " + str(e))
return 1
logger.info("check domain vcpupin configuration in xml")
ret = vcpupin_check(domobj, vcpu, cpumap)
if ret:
logger.error("domain vcpu pin check failed")
return 1
else:
logger.info("domain vcpu pin check succeed")
return 0
|
gpl-2.0
| -5,593,081,498,587,919,000
| 31.541284
| 76
| 0.594587
| false
| 3.714136
| true
| false
| false
|
kubaszostak/gdal-dragndrop
|
osgeo/apps/Python27/Scripts/esri2wkt.py
|
1
|
2143
|
#!C:\OSGEO4~1\bin\python.exe
# ******************************************************************************
# $Id: esri2wkt.py 7464f4b11b93bb2d1098d1b962907228932bf8c1 2018-05-03 19:56:49 +1000 Ben Elliston $
#
# Project: GDAL
# Purpose: Simple command line program for translating ESRI .prj files
# into WKT.
# Author: Frank Warmerdam, warmerda@home.com
#
# ******************************************************************************
# Copyright (c) 2000, Frank Warmerdam
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import osr
if len(sys.argv) < 2:
print('Usage: esri2wkt.py <esri .prj file>')
sys.exit(1)
prj_fd = open(sys.argv[1])
prj_lines = prj_fd.readlines()
prj_fd.close()
for i, prj_line in enumerate(prj_lines):
prj_lines[i] = prj_line.rstrip()
prj_srs = osr.SpatialReference()
err = prj_srs.ImportFromESRI(prj_lines)
if err != 0:
print('Error = %d' % err)
else:
print(prj_srs.ExportToPrettyWkt())
|
mit
| -157,707,794,943,707,520
| 38.433962
| 101
| 0.631358
| false
| 3.467638
| false
| false
| false
|
OpenHumans/open-humans
|
public_data/models.py
|
1
|
3781
|
from collections import OrderedDict
from itertools import groupby
from django.db import models
from django.db.models import F
from common.fields import AutoOneToOneField
from open_humans.models import Member
from private_sharing.models import (
DataRequestProjectMember,
ProjectDataFile,
id_label_to_project,
)
def is_public(member, source):
"""
Return whether a given member has publicly shared the given source.
"""
project = id_label_to_project(source)
return bool(
member.public_data_participant.publicdataaccess_set.filter(
project_membership__project=project, is_public=True
)
)
def public_count(project):
"""
Get number of users publicly sharing a project's data.
"""
count = (
PublicDataAccess.objects.filter(
project_membership__project=project,
# Filter to only count members with datafiles for this project.
is_public=True,
project_membership__project__in=F(
"project_membership__member__user__datafiles__"
"parent_project_data_file__direct_sharing_project"
),
)
.distinct()
.count()
)
return count
class Participant(models.Model):
"""
Represents a participant in the Public Data Sharing study.
"""
member = AutoOneToOneField(
Member, related_name="public_data_participant", on_delete=models.CASCADE
)
enrolled = models.BooleanField(default=False)
def _files_for_project(self, project):
return ProjectDataFile.objects.filter(
user=self.member.user, direct_sharing_project=project
).exclude(completed=False)
@property
def public_data_w_vis_membership_by_proj(self):
vis_projs_w_public_data = [
pda.project_membership.project
for pda in self.publicdataaccess_set.filter(
is_public=True, project_membership__visible=True
)
]
files = self.member.user.datafiles.filter(
parent_project_data_file__direct_sharing_project__in=vis_projs_w_public_data
).order_by("parent_project_data_file__direct_sharing_project", "created")
grouped_by_project = groupby(
files, key=lambda x: x.parent_project_data_file.direct_sharing_project
)
files_by_project = OrderedDict()
for proj, files in grouped_by_project:
files_by_project[proj] = []
for file in files:
files_by_project[proj].append(file)
return files_by_project
def __str__(self):
status = "Enrolled" if self.enrolled else "Not enrolled"
return str("{0}:{1}").format(self.member, status)
class PublicDataAccess(models.Model):
"""
Keep track of public sharing for a data source.
The data source is the DataRequestProject identified by the project_membership.
"""
participant = models.ForeignKey(Participant, on_delete=models.CASCADE)
project_membership = models.OneToOneField(
DataRequestProjectMember, on_delete=models.CASCADE
)
is_public = models.BooleanField(default=False)
def __str__(self):
status = "Private"
if self.is_public:
status = "Public"
return str("{0}:{1}:{2}").format(
self.participant.member.user.username,
self.project_membership.project.name,
status,
)
class WithdrawalFeedback(models.Model):
"""
Keep track of any feedback a study participant gives when they withdraw
from the study.
"""
member = models.ForeignKey(Member, on_delete=models.CASCADE)
feedback = models.TextField(blank=True)
withdrawal_date = models.DateTimeField(auto_now_add=True)
|
mit
| 3,227,372,887,171,134,000
| 29.491935
| 88
| 0.642687
| false
| 4.109783
| false
| false
| false
|
bzcheeseman/pytorch-EMM
|
Examples/basic_controller.py
|
1
|
15995
|
#
# Created by Aman LaChapelle on 3/23/17.
#
# pytorch-EMM
# Copyright (c) 2017 Aman LaChapelle
# Full license at pytorch-EMM/LICENSE.txt
#
import torch
import torch.nn as nn
import torch.nn.functional as Funct
from torch.autograd import Variable
import torch.optim as optim
import numpy as np
from Utils import num_flat_features
from EMM import EMM_NTM, EMM_GPU
from Utils import CopyTask
class FeedForwardController(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
batch_size,
num_reads=1,
memory_dims=(128, 20)):
super(FeedForwardController, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.batch_size = batch_size
self.memory_dims = memory_dims
self.in_to_hid = nn.Linear(self.num_inputs, self.num_hidden)
self.read_to_hid = nn.Linear(self.memory_dims[1]*num_reads, self.num_hidden)
def forward(self, x, read):
x = x.contiguous()
x = x.view(-1, num_flat_features(x))
read = read.contiguous()
read = read.view(-1, num_flat_features(read))
x = Funct.relu(self.in_to_hid(x)) + Funct.relu(self.read_to_hid(read))
return x
class GRUController(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
batch_size,
num_reads=1,
memory_dims=(128, 20)):
super(GRUController, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.batch_size = batch_size
self.memory_dims = memory_dims
self.gru = nn.GRUCell(
input_size=self.num_inputs,
hidden_size=self.num_hidden
)
self.read_to_in = nn.Linear(self.memory_dims[1]*num_reads, self.num_inputs)
def forward(self, x, read, h_t):
x = x.contiguous()
r = Funct.relu(self.read_to_in(read))
r = r.view(*x.size())
x = Funct.relu(x + r)
x = x.view(-1, num_flat_features(x))
h_tp1 = self.gru(x, h_t)
return h_tp1
class NTM(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
num_outputs,
batch_size,
num_reads,
memory_dims=(128, 20)):
super(NTM, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.num_outputs = num_outputs
self.batch_size = batch_size
self.num_reads = num_reads
self.memory_dims = memory_dims
self.EMM = EMM_NTM(self.num_hidden, self.batch_size, num_reads=self.num_reads,
num_shifts=3, memory_dims=self.memory_dims)
self.controller = GRUController(self.num_inputs, self.num_hidden, self.batch_size,
num_reads=self.num_reads, memory_dims=self.memory_dims)
self.hid_to_out = nn.Linear(self.num_hidden, self.num_outputs)
def init_hidden(self):
wr, ww, memory = self.EMM.init_weights_mem()
hidden = Variable(torch.zeros(self.batch_size, self.num_hidden))
return hidden, wr, ww, memory
def forward(self, x, h, wr, ww, m):
x = x.permute(1, 0, 2, 3)
def step(x_t, h_t, wr_t, ww_t, m_t):
r_t, wr_t, ww_t, m_t = self.EMM(h_t, wr_t, ww_t, m_t)
h_t = self.controller(x_t, r_t, h_t)
out = Funct.sigmoid(self.hid_to_out(h_t.view(-1, num_flat_features(h_t))))
return out, h_t, wr_t, ww_t, m_t
x_t = torch.unbind(x, 0)
out = []
for i in range(x.size()[0]):
o, h, wr, ww, m = step(x_t[i], h, wr, ww, m)
out.append(o)
outs = torch.stack(out, 1)
return outs, h, wr, ww, m
class GPU_NTM(nn.Module):
def __init__(self,
num_inputs,
num_hidden,
num_outputs,
batch_size,
mem_banks,
num_reads,
memory_dims=(32, 32)):
super(GPU_NTM, self).__init__()
self.num_inputs = num_inputs
self.num_hidden = num_hidden
self.num_outputs = num_outputs
self.batch_size = batch_size
self.mem_banks = mem_banks
self.num_reads = num_reads
self.memory_dims = memory_dims
self.EMM = EMM_GPU(self.num_hidden, self.num_reads*self.memory_dims[1], self.batch_size,
memory_banks=self.mem_banks, memory_dims=self.memory_dims)
self.controller = GRUController(self.num_inputs, self.num_hidden, self.batch_size,
num_reads=self.num_reads, memory_dims=self.memory_dims)
self.hid_to_out = nn.Linear(self.num_hidden, self.num_outputs)
def init_hidden(self):
wr, ww, memory = self.EMM.init_weights_mem()
hidden = Variable(torch.zeros(self.batch_size, self.num_hidden), requires_grad=True)
return hidden, wr, ww, memory
def forward(self, x, h, wr, ww, m):
x = x.permute(1, 0, 2, 3)
def step(x_t, h_t, wr_t, ww_t, m_t):
r_tp1, m_tp1, wr_tp1, ww_tp1 = self.EMM(h_t, wr_t, ww_t, m_t) # update reads, memory
print(x_t, h_t)
h_tp1 = self.controller(x_t, r_tp1, h_t) # update hidden state - goes to nan whenever the input is zero
out = Funct.relu(self.hid_to_out(h_tp1)) # send out data
return out, h_tp1, wr_tp1, ww_tp1, m_tp1
x_t = torch.unbind(x, 0)
out = []
for i in range(x.size()[0]):
o, h_t, wr_t, ww_t, m_t = step(x_t[i], h, wr, ww, m)
# assert not torch.equal(h_t.data, h.data)
assert not torch.equal(wr_t.data, wr.data)
assert not torch.equal(ww_t.data, ww.data)
assert not torch.equal(m_t.data, m.data)
h = h_t
wr = wr_t
ww = ww_t
m = m_t
out.append(o)
outs = torch.stack(out, 1)
return outs, h, wr, ww, m
def train_gpu(batch, num_inputs, seq_len, num_hidden):
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
ntm = GPU_NTM(num_inputs, num_hidden, num_inputs, batch, num_reads=1, mem_banks=5)
try:
ntm.load_state_dict(torch.load("models/copy_seqlen_{}.dat".format(seq_len)))
except FileNotFoundError or AttributeError:
pass
ntm.train()
h, wr, ww, m = ntm.init_hidden()
criterion = nn.SmoothL1Loss()
max_seq_len = 20
current_lr = 1e-3
print_steps = 1000
optimizer = optim.Adam(ntm.parameters(), lr=current_lr)
for length in range(4, max_seq_len, 2):
current_lr = 1e-3
running_loss = 0.0
prev_running_loss = []
test = CopyTask(length, [num_inputs, 1], num_samples=3e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
for epoch in range(1):
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs = Variable(inputs)
labels = Variable(labels)
ntm.zero_grad()
outputs, h, wr, ww, m = ntm(inputs, h, wr, ww, m)
if np.isnan(m.data[0, 0, 0]):
print(i)
raise NameError
h = Variable(h.data)
wr = Variable(wr.data)
ww = Variable(ww.data)
m = Variable(m.data)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.data[0]
if i % print_steps == print_steps-1:
print('[length: %d, epoch: %d, i: %5d] average loss: %.3f' % (length, epoch + 1, i + 1,
running_loss / print_steps))
plt.imshow(m[0].data.numpy())
plt.savefig("plots/ntm/{}_{}_{}_memory.png".format(length, epoch + 1, i + 1))
plt.close()
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plottable_true_output = torch.squeeze(labels.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_{}_input.png".format(length, epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_{}_net_output.png".format(length, epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_true_output)
plt.savefig("plots/ntm/{}_{}_{}_true_output.png".format(length, epoch + 1, i + 1))
plt.close()
# print("Previous average losses since lr decay: ", prev_running_loss)
prev_running_loss.append(running_loss / print_steps)
if len(prev_running_loss) > 2:
if np.abs(np.diff(prev_running_loss)).min() <= 0.001 \
and running_loss/print_steps < 1./len(prev_running_loss):
torch.save(ntm.state_dict(), "models/gpu_copy_seqlen_{}.dat".format(seq_len))
current_lr = max([current_lr * 1e-1, 1e-6])
print("lr decayed to: ", current_lr)
optimizer = optim.Adam(ntm.parameters(), lr=current_lr)
prev_running_loss.clear()
running_loss = 0.0
torch.save(ntm.state_dict(), "models/gpu_copy_seqlen_{}.dat".format(seq_len))
print("Finished Training")
test = CopyTask(5 * max_seq_len, [num_inputs - 1, 1], num_samples=1e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
total_loss = 0.0
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs.volatile = True
inputs = Variable(inputs)
labels = Variable(labels)
outputs = ntm(inputs)
if i % 1000 / batch == (1000 / batch) - 1:
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_input_test.png".format(epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_net_output_test.png".format(epoch + 1, i + 1))
plt.close()
total_loss += len(data) * criterion(outputs, labels).data
print("Total Loss: {}".format(total_loss / len(data_loader)))
def train_ntm(batch, num_inputs, seq_len, num_hidden):
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
ntm = NTM(num_inputs, num_hidden, num_inputs, batch, num_reads=1)
h, wr, ww, m = ntm.init_hidden()
try:
ntm.load_state_dict(torch.load("models/copy_seqlen_{}.dat".format(seq_len)))
except FileNotFoundError or AttributeError:
pass
ntm.train()
state = ntm.state_dict()
criterion = nn.L1Loss()
current_lr = 1e-3
print_steps = 1000
optimizer = optim.Adam(ntm.parameters(), lr=current_lr, weight_decay=0.00001)
max_seq_len = 20
for length in range(4, max_seq_len):
current_lr = 1e-3
running_loss = 0.0
prev_running_loss = []
test = CopyTask(length, [num_inputs, 1], num_samples=2e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
for epoch in range(5):
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs = Variable(inputs)
labels = Variable(labels)
ntm.zero_grad()
outputs, h, wr, ww, m = ntm(inputs, h, wr, ww, m)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
h = Variable(h.data)
wr = Variable(wr.data)
ww = Variable(ww.data)
m = Variable(m.data)
running_loss += loss.data[0]
if i % print_steps == print_steps-1:
print('[length: %d, epoch: %d, i: %5d] average loss: %.3f' % (length, epoch + 1, i + 1,
running_loss / print_steps))
plt.imshow(wr.squeeze(0).data.numpy())
plt.savefig("plots/ntm/{}_{}_{}_read.png".format(length, epoch+1, i + 1))
plt.close()
plt.imshow(m.squeeze().data.numpy().T)
plt.savefig("plots/ntm/{}_{}_{}_memory.png".format(length, epoch + 1, i + 1))
plt.close()
plt.imshow(ww.data.numpy())
plt.savefig("plots/ntm/{}_{}_{}_write.png".format(length, epoch + 1, i + 1))
plt.close()
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plottable_true_output = torch.squeeze(labels.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_{}_input.png".format(length, epoch+1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_{}_net_output.png".format(length, epoch+1, i + 1))
plt.close()
plt.imshow(plottable_true_output)
plt.savefig("plots/ntm/{}_{}_{}_true_output.png".format(length, epoch+1, i + 1))
plt.close()
prev_running_loss.append(running_loss / print_steps)
if len(prev_running_loss) > 2:
if np.abs(np.diff(prev_running_loss)).min() <= 0.001 \
and running_loss / print_steps < 1. / len(prev_running_loss):
torch.save(ntm.state_dict(), "models/gpu_copy_seqlen_{}.dat".format(seq_len))
current_lr = max([current_lr * 1e-1, 1e-6])
print("lr decayed to: ", current_lr)
optimizer = optim.Adam(ntm.parameters(), lr=current_lr)
prev_running_loss.clear()
running_loss = 0.0
torch.save(ntm.state_dict(), "models/copy_seqlen_{}.dat".format(seq_len))
print("Finished Training")
test = CopyTask(5 * max_seq_len, [num_inputs-1, 1], num_samples=1e4)
data_loader = DataLoader(test, batch_size=batch, shuffle=True, num_workers=4)
total_loss = 0.0
for i, data in enumerate(data_loader, 0):
inputs, labels = data
inputs.volatile = True
inputs = Variable(inputs)
labels = Variable(labels)
outputs = ntm(inputs)
if i % 1000/batch == (1000/batch)-1:
plottable_input = torch.squeeze(inputs.data[0]).numpy()
plottable_output = torch.squeeze(outputs.data[0]).numpy()
plt.imshow(plottable_input)
plt.savefig("plots/ntm/{}_{}_input_test.png".format(epoch + 1, i + 1))
plt.close()
plt.imshow(plottable_output)
plt.savefig("plots/ntm/{}_{}_net_output_test.png".format(epoch + 1, i + 1))
plt.close()
total_loss += len(data) * criterion(outputs, labels).data
print("Total Loss: {}".format(total_loss / len(data_loader)))
if __name__ == '__main__':
# train_ntm(1, 8, 5, 100)
train_gpu(1, 8, 5, 100)
|
gpl-3.0
| -1,579,938,047,493,311,000
| 34.153846
| 117
| 0.519662
| false
| 3.508445
| true
| false
| false
|
decabyte/analog_sensors_board
|
scripts/analog_sensors.py
|
1
|
4417
|
# analog_sensors.py
# author: Valerio De Carolis <valerio.decarolis@gmail.com>
# date: 2013-10-30
# license: MIT
import sys
import os
import time
import signal
import serial
from serial import Serial, SerialException
# default serial configuration
DEFAULT_CONF = {
'port': '/dev/ttyACM3',
'baudrate': 57600,
'bytesize': serial.EIGHTBITS,
'parity': serial.PARITY_NONE,
'stopbits': serial.STOPBITS_ONE,
'timeout': 5
}
class AnalogSensorsClient:
def __init__(self):
# battery
self.batt0 = 0
self.batt1 = 0
self.batt2 = 0
self.batt3 = 0
self.raw_batt0 = 0
self.raw_batt1 = 0
self.raw_batt2 = 0
self.raw_batt3 = 0
# temperature
self.temp0 = 0
self.temp1 = 0
self.temp2 = 0
self.temp3 = 0
self.raw_temp0 = 0
self.raw_temp1 = 0
self.raw_temp2 = 0
self.raw_temp3 = 0
# pressure
self.bmp_temperature = 0
self.bmp_pressure = 0
self.bmp_ut = 0
self.bmp_up = 0
self.bmp_dirty = 0
# humidity
self.humidity = 0
self.raw_humidity = 0
# timestamps
self.timestamp = 0
# protocol parsers
self.GRAMMAR = {
'BMPCAL': self.parse_bmpcal,
'BAT': self.parse_battery,
'TEMP': self.parse_temperature,
'HIH': self.parse_humidity,
'BMP': self.parse_pressure,
'TIME': self.parse_timestamp
}
def print_status(self):
print('BATTERY VOLTAGES: {}V {}V {}V {}V'.format(
self.batt0, self.batt1, self.batt2, self.batt3))
print('VEHICLE TEMPERATURES: {}C {}C {}C {}C'.format(
self.temp0, self.temp1, self.temp2, self.temp3))
print('VEHICLE ENVIRONMENT: {}C {}Pa {}RH%\n'.format(
self.bmp_temperature, self.bmp_pressure, self.humidity))
def parse_message(self, msg):
'''
An example serial message:
$TEMP,122.10,123.10,123.10,127.85,488,492,492,511
'''
# parse serial message
items = msg.split(',')
# look for identifier
if items[0][0] is not '$':
return
# extract message type
msg_type = items[0][1:]
# check message type
try:
parser = self.GRAMMAR[msg_type]
parser(items)
except KeyError as ke:
print('[WARN]: message not recognized! bad format?')
def parse_battery(self, field):
# battery voltages
self.batt0 = float(field[1])
self.batt1 = float(field[2])
self.batt2 = float(field[3])
self.batt3 = float(field[4])
# raw analog readings
self.raw_batt0 = int(field[5])
self.raw_batt1 = int(field[6])
self.raw_batt2 = int(field[7])
self.raw_batt3 = int(field[8])
def parse_bmpcal(self, field):
pass
def parse_temperature(self, field):
# temperature
self.temp0 = float(field[1])
self.temp1 = float(field[2])
self.temp2 = float(field[3])
self.temp3 = float(field[4])
# raw analog readings
self.raw_temp0 = int(field[5])
self.raw_temp1 = int(field[6])
self.raw_temp2 = int(field[7])
self.raw_temp3 = int(field[8])
def parse_humidity(self, field):
self.humidity = float(field[1])
self.raw_humidity = int(field[2])
def parse_pressure(self, field):
self.bmp_temperature = float(field[1])
self.bmp_pressure = float(field[2])
self.bmp_ut = int(field[3])
self.bmp_up = int(field[4])
self.bmp_dirty = int(field[5])
def parse_timestamp(self, field):
self.timestamp = int(field[1])
def main():
# control flags
running = True
connected = False
sconn = None
# signal handler
def handler(signum, frame):
running = False
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
# analog client
client = AnalogSensorsClient()
# connection main loop
while running:
try:
sconn = Serial(**DEFAULT_CONF)
except ValueError as ve:
print('[FATAL]: bad port configuration!')
sys.exit(-1)
except SerialException as se:
connected = False
print('[ERROR]: device not found, waiting for device ...')
# wait a little before trying to reconnect
time.sleep(5)
continue
else:
connected = True
# data processing loop
while connected:
try:
line = sconn.readline()
except SerialException as se:
connected = False
print('[ERROR]: connection lost!')
break
if len(line) != 0:
msg = line.strip() # remove any return carriage
client.parse_message(msg) # digest the message
# display status
client.print_status()
# release the serial connection
if sconn.isOpen():
sconn.close()
# close the connection if hang
if sconn is not None and sconn.isOpen():
sconn.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
mit
| -6,909,965,110,469,711,000
| 19.737089
| 61
| 0.659724
| false
| 2.724861
| false
| false
| false
|
hellowebapp/hellowebapp-ic-code
|
collection/models.py
|
1
|
2080
|
from __future__ import unicode_literals
from PIL import Image
from django.contrib.auth.models import User
from django.db import models
class Timestamp(models.Model):
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Thing(Timestamp):
name = models.CharField(max_length=255)
description = models.TextField()
slug = models.SlugField(unique=True)
user = models.OneToOneField(User, blank=True, null=True)
upgraded = models.BooleanField(default=False)
stripe_id = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.name
def get_absolute_url(self):
return "/things/%s/" % self.slug
class Social(Timestamp):
SOCIAL_TYPES = (
('twitter', 'Twitter'),
('facebook', 'Facebook'),
('pinterest', 'Pinterest'),
('instagram', 'Instagram'),
)
network = models.CharField(max_length=255, choices=SOCIAL_TYPES)
username = models.CharField(max_length=255)
thing = models.ForeignKey(Thing,
on_delete=CASCADE, related_name="social_accounts")
# where we're overriding the admin name
class Meta:
verbose_name_plural = "Social media links"
# our helper, add above the new model
def get_image_path(instance, filename):
return '/'.join(['thing_images', instance.thing.slug, filename])
class Upload(models.Model):
thing = models.ForeignKey(Thing,
on_delete=models.CASCADE, related_name="uploads")
image = models.ImageField(upload_to=get_image_path)
# add this bit in after our model
def save(self, *args, **kwargs):
# this is required when you override save functions
super(Upload, self).save(*args, **kwargs)
# our new code
if self.image:
image = Image.open(self.image)
i_width, i_height = image.size
max_size = (1000,1000)
if i_width > 1000:
image.thumbnail(max_size, Image.ANTIALIAS)
image.save(self.image.path)
|
mit
| -5,621,587,585,220,553,000
| 28.714286
| 68
| 0.650481
| false
| 3.823529
| false
| false
| false
|
sannecottaar/burnman
|
burnman/material.py
|
1
|
20844
|
from __future__ import print_function
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
import numpy as np
def material_property(func):
"""
Decorator @material_property to be used for cached properties of materials.
To be used on function in Material or derived classes that should be exposed
as read-only properties that are cached. The function Material.reset() will
reset the cached values.
Internally, the values are stored in a dictionary member called _cached, which
is emptied by .reset().
"""
class mat_obj():
def __init__(self, func):
self.func = func
self.varname = self.func.__name__
def get(self, obj):
if not hasattr(obj, "_cached"):
raise Exception("The material_property decorator could not find class member _cached. "
"Did you forget to call Material.__init__(self) in __init___?")
cache_array = getattr(obj, "_cached")
if self.varname not in cache_array:
cache_array[self.varname] = self.func(obj)
return cache_array[self.varname]
return property(mat_obj(func).get, doc=func.__doc__)
class Material(object):
"""
Base class for all materials. The main functionality is unroll() which
returns a list of objects of type :class:`~burnman.mineral.Mineral` and their molar
fractions. This class is available as ``burnman.Material``.
The user needs to call set_method() (once in the beginning) and set_state()
before querying the material with unroll() or density().
"""
def __init__(self):
self._pressure = None
self._temperature = None
if not hasattr(self, "name"):
# if a derived class decides to set .name before calling this
# constructor (I am looking at you, SLB_2011.py!), do not
# overwrite the name here.
self._name = self.__class__.__name__
self._cached = {}
@property
def name(self):
""" Human-readable name of this material.
By default this will return the name of the class, but it can be set
to an arbitrary string. Overriden in Mineral.
"""
return self._name
@name.setter
def name(self, value):
self._name = value
def set_method(self, method):
"""
Set the averaging method. See :doc:`averaging` for details.
Notes
-----
Needs to be implemented in derived classes.
"""
raise NotImplementedError(
"need to implement set_method() in derived class!")
def to_string(self):
"""
Returns a human-readable name of this material. The default implementation will return the name of the class,
which is a reasonable default.
Returns
-------
name : string
Name of this material.
"""
return "'" + self.name + "'"
def debug_print(self, indent=""):
"""
Print a human-readable representation of this Material.
"""
raise NotImplementedError(
"Derived classes need to implement debug_print(). This is '" + self.__class__.__name__ + "'")
def print_minerals_of_current_state(self):
"""
Print a human-readable representation of this Material at the current P, T as a list of minerals.
This requires set_state() has been called before.
"""
(minerals, fractions) = self.unroll()
if len(minerals) == 1:
print(minerals[0].to_string())
else:
print("Material %s:" % self.to_string())
for (mineral, fraction) in zip(minerals, fractions):
print(" %g of phase %s" % (fraction, mineral.to_string()))
def set_state(self, pressure, temperature):
"""
Set the material to the given pressure and temperature.
Parameters
----------
pressure : float
The desired pressure in [Pa].
temperature : float
The desired temperature in [K].
"""
if not hasattr(self, "_pressure"):
raise Exception("Material.set_state() could not find class member _pressure. "
"Did you forget to call Material.__init__(self) in __init___?")
self.reset()
self._pressure = pressure
self._temperature = temperature
def reset(self):
"""
Resets all cached material properties.
It is typically not required for the user to call this function.
"""
self._cached = {}
def unroll(self):
"""
Unroll this material into a list of :class:`burnman.Mineral` and their molar fractions. All averaging schemes
then operate on this list of minerals. Note that the return value of this function may depend on the current
state (temperature, pressure).
Notes
-----
Needs to be implemented in derived classes.
Returns
-------
fractions : list of float
List of molar fractions, should sum to 1.0.
minerals : list of :class:`burnman.Mineral`
List of minerals.
"""
raise NotImplementedError(
"need to implement unroll() in derived class!")
def evaluate(self, vars_list, pressures, temperatures):
"""
Returns an array of material properties requested through a list of strings at given pressure and temperature
conditions. At the end it resets the set_state to the original values.
The user needs to call set_method() before.
Parameters
----------
vars_list : list of strings
Variables to be returned for given conditions
pressures : ndlist or ndarray of float
n-dimensional array of pressures in [Pa].
temperatures : ndlist or ndarray of float
n-dimensional array of temperatures in [K].
Returns
-------
output : array of array of float
Array returning all variables at given pressure/temperature values. output[i][j] is property vars_list[j]
and temperatures[i] and pressures[i].
"""
old_pressure = self.pressure
old_temperature = self.temperature
pressures = np.array(pressures)
temperatures = np.array(temperatures)
assert(pressures.shape == temperatures.shape)
output = np.empty((len(vars_list),) + pressures.shape)
for i, p in np.ndenumerate(pressures):
self.set_state(p, temperatures[i])
for j in range(len(vars_list)):
output[(j,) + i] = getattr(self, vars_list[j])
if old_pressure is None or old_temperature is None:
# do not set_state if old values were None. Just reset to None
# manually
self._pressure = self._temperature = None
self.reset()
else:
self.set_state(old_pressure, old_temperature)
return output
@property
def pressure(self):
"""
Returns current pressure that was set with :func:`~burnman.material.Material.set_state`.
Notes
-----
- Aliased with :func:`~burnman.material.Material.P`.
Returns
-------
pressure : float
Pressure in [Pa].
"""
return self._pressure
@property
def temperature(self):
"""
Returns current temperature that was set with :func:`~burnman.material.Material.set_state`.
Notes
-----
- Aliased with :func:`~burnman.material.Material.T`.
Returns
-------
temperature : float
Temperature in [K].
"""
return self._temperature
@material_property
def internal_energy(self):
"""
Returns the internal energy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.energy`.
Returns
-------
internal_energy : float
The internal energy in [J].
"""
raise NotImplementedError(
"need to implement internal_energy() in derived class!")
@material_property
def molar_gibbs(self):
"""
Returns the Gibbs free energy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.gibbs`.
Returns
-------
molar_gibbs : float
Gibbs free energy in [J].
"""
raise NotImplementedError(
"need to implement molar_gibbs() in derived class!")
@material_property
def molar_helmholtz(self):
"""
Returns the Helmholtz free energy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.helmholtz`.
Returns
-------
molar_helmholtz : float
Helmholtz free energy in [J].
"""
raise NotImplementedError(
"need to implement molar_helmholtz() in derived class!")
@material_property
def molar_mass(self):
"""
Returns molar mass of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
Returns
-------
molar_mass : float
Molar mass in [kg/mol].
"""
raise NotImplementedError(
"need to implement molar_mass() in derived class!")
@material_property
def molar_volume(self):
"""
Returns molar volume of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.V`.
Returns
-------
molar_volume : float
Molar volume in [m^3/mol].
"""
raise NotImplementedError(
"need to implement molar_volume() in derived class!")
@material_property
def density(self):
"""
Returns the density of this material.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.rho`.
Returns
-------
density : float
The density of this material in [kg/m^3].
"""
raise NotImplementedError(
"need to implement density() in derived class!")
@material_property
def molar_entropy(self):
"""
Returns entropy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.S`.
Returns
-------
entropy : float
Entropy in [J].
"""
raise NotImplementedError(
"need to implement molar_entropy() in derived class!")
@material_property
def molar_enthalpy(self):
"""
Returns enthalpy of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.H`.
Returns
-------
enthalpy : float
Enthalpy in [J].
"""
raise NotImplementedError(
"need to implement molar_enthalpy() in derived class!")
@material_property
def isothermal_bulk_modulus(self):
"""
Returns isothermal bulk modulus of the material.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.K_T`.
Returns
-------
isothermal_bulk_modulus : float
Bulk modulus in [Pa].
"""
raise NotImplementedError(
"need to implement isothermal_bulk_moduls() in derived class!")
@material_property
def adiabatic_bulk_modulus(self):
"""
Returns the adiabatic bulk modulus of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.K_S`.
Returns
-------
adiabatic_bulk_modulus : float
Adiabatic bulk modulus in [Pa].
"""
raise NotImplementedError(
"need to implement adiabatic_bulk_modulus() in derived class!")
@material_property
def isothermal_compressibility(self):
"""
Returns isothermal compressibility of the mineral (or inverse isothermal bulk modulus).
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.beta_T`.
Returns
-------
(K_T)^-1 : float
Compressibility in [1/Pa].
"""
raise NotImplementedError(
"need to implement compressibility() in derived class!")
@material_property
def adiabatic_compressibility(self):
"""
Returns adiabatic compressibility of the mineral (or inverse adiabatic bulk modulus).
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.beta_S`.
Returns
-------
adiabatic_compressibility : float
adiabatic compressibility in [1/Pa].
"""
raise NotImplementedError(
"need to implement compressibility() in derived class!")
@material_property
def shear_modulus(self):
"""
Returns shear modulus of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.beta_G`.
Returns
-------
shear_modulus : float
Shear modulus in [Pa].
"""
raise NotImplementedError(
"need to implement shear_modulus() in derived class!")
@material_property
def p_wave_velocity(self):
"""
Returns P wave speed of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.v_p`.
Returns
-------
p_wave_velocity : float
P wave speed in [m/s].
"""
raise NotImplementedError(
"need to implement p_wave_velocity() in derived class!")
@material_property
def bulk_sound_velocity(self):
"""
Returns bulk sound speed of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.v_phi`.
Returns
-------
bulk sound velocity: float
Sound velocity in [m/s].
"""
raise NotImplementedError(
"need to implement bulk_sound_velocity() in derived class!")
@material_property
def shear_wave_velocity(self):
"""
Returns shear wave speed of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.v_s`.
Returns
-------
shear_wave_velocity : float
Wave speed in [m/s].
"""
raise NotImplementedError(
"need to implement shear_wave_velocity() in derived class!")
@material_property
def grueneisen_parameter(self):
"""
Returns the grueneisen parameter of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.gr`.
Returns
-------
gr : float
Grueneisen parameters [unitless].
"""
raise NotImplementedError(
"need to implement grueneisen_parameter() in derived class!")
@material_property
def thermal_expansivity(self):
"""
Returns thermal expansion coefficient of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.alpha`.
Returns
-------
alpha : float
Thermal expansivity in [1/K].
"""
raise NotImplementedError(
"need to implement thermal_expansivity() in derived class!")
@material_property
def heat_capacity_v(self):
"""
Returns heat capacity at constant volume of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.C_v`.
Returns
-------
heat_capacity_v : float
Heat capacity in [J/K/mol].
"""
raise NotImplementedError(
"need to implement heat_capacity_v() in derived class!")
@material_property
def heat_capacity_p(self):
"""
Returns heat capacity at constant pressure of the mineral.
Notes
-----
- Needs to be implemented in derived classes.
- Aliased with :func:`~burnman.material.Material.C_p`.
Returns
-------
heat_capacity_p : float
Heat capacity in [J/K/mol].
"""
raise NotImplementedError(
"need to implement heat_capacity_p() in derived class!")
#
# Aliased properties
@property
def P(self):
"""Alias for :func:`~burnman.material.Material.pressure`"""
return self.pressure
@property
def T(self):
"""Alias for :func:`~burnman.material.Material.temperature`"""
return self.temperature
@property
def energy(self):
"""Alias for :func:`~burnman.material.Material.internal_energy`"""
return self.internal_energy
@property
def helmholtz(self):
"""Alias for :func:`~burnman.material.Material.molar_helmholtz`"""
return self.molar_helmholtz
@property
def gibbs(self):
"""Alias for :func:`~burnman.material.Material.molar_gibbs`"""
return self.molar_gibbs
@property
def V(self):
"""Alias for :func:`~burnman.material.Material.molar_volume`"""
return self.molar_volume
@property
def rho(self):
"""Alias for :func:`~burnman.material.Material.density`"""
return self.density
@property
def S(self):
"""Alias for :func:`~burnman.material.Material.molar_entropy`"""
return self.molar_entropy
@property
def H(self):
"""Alias for :func:`~burnman.material.Material.molar_enthalpy`"""
return self.molar_enthalpy
@property
def K_T(self):
"""Alias for :func:`~burnman.material.Material.isothermal_bulk_modulus`"""
return self.isothermal_bulk_modulus
@property
def K_S(self):
"""Alias for :func:`~burnman.material.Material.adiabatic_bulk_modulus`"""
return self.adiabatic_bulk_modulus
@property
def beta_T(self):
"""Alias for :func:`~burnman.material.Material.isothermal_compressibility`"""
return self.isothermal_compressibility
@property
def beta_S(self):
"""Alias for :func:`~burnman.material.Material.adiabatic_compressibility`"""
return self.adiabatic_compressibility
@property
def G(self):
"""Alias for :func:`~burnman.material.Material.shear_modulus`"""
return self.shear_modulus
@property
def v_p(self):
"""Alias for :func:`~burnman.material.Material.p_wave_velocity`"""
return self.p_wave_velocity
@property
def v_phi(self):
"""Alias for :func:`~burnman.material.Material.bulk_sound_velocity`"""
return self.bulk_sound_velocity
@property
def v_s(self):
"""Alias for :func:`~burnman.material.Material.shear_wave_velocity`"""
return self.shear_wave_velocity
@property
def gr(self):
"""Alias for :func:`~burnman.material.Material.grueneisen_parameter`"""
return self.grueneisen_parameter
@property
def alpha(self):
"""Alias for :func:`~burnman.material.Material.thermal_expansivity`"""
return self.thermal_expansivity
@property
def C_v(self):
"""Alias for :func:`~burnman.material.Material.heat_capacity_v`"""
return self.heat_capacity_v
@property
def C_p(self):
"""Alias for :func:`~burnman.material.Material.heat_capacity_p`"""
return self.heat_capacity_p
|
gpl-2.0
| -7,016,233,993,997,625,000
| 28.607955
| 117
| 0.576473
| false
| 4.388211
| false
| false
| false
|
j-carl/ansible
|
lib/ansible/module_utils/facts/system/distribution.py
|
1
|
31164
|
# -*- coding: utf-8 -*-
# Copyright: (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
from ansible.module_utils.common.sys_info import get_distribution, get_distribution_version, \
get_distribution_codename
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
def get_uname(module, flags=('-v')):
if isinstance(flags, str):
flags = flags.split()
command = ['uname']
command.extend(flags)
rc, out, err = module.run_command(command)
if rc == 0:
return out
return None
def _file_exists(path, allow_empty=False):
# not finding the file, exit early
if not os.path.exists(path):
return False
# if just the path needs to exists (ie, it can be empty) we are done
if allow_empty:
return True
# file exists but is empty and we dont allow_empty
if os.path.getsize(path) == 0:
return False
# file exists with some content
return True
class DistributionFiles:
'''has-a various distro file parsers (os-release, etc) and logic for finding the right one.'''
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
# keep names in sync with Conditionals page of docs
OSDIST_LIST = (
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'Archlinux'},
{'path': '/etc/os-release', 'name': 'SUSE'},
{'path': '/etc/SuSE-release', 'name': 'SUSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
{'path': '/etc/os-release', 'name': 'NA'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT',
'SMGL': 'Source Mage GNU/Linux',
}
# We can't include this in SEARCH_STRING because a name match on its keys
# causes a fallback to using the first whitespace separated item from the file content
# as the name. For os-release, that is in form 'NAME=Arch'
OS_RELEASE_ALIAS = {
'Archlinux': 'Arch Linux'
}
STRIP_QUOTES = r'\'\"\\'
def __init__(self, module):
self.module = module
def _get_file_content(self, path):
return get_file_content(path)
def _get_dist_file_content(self, path, allow_empty=False):
# cant find that dist file or it is incorrectly empty
if not _file_exists(path, allow_empty=allow_empty):
return False, None
data = self._get_file_content(path)
return True, data
def _parse_dist_file(self, name, dist_file_content, path, collected_facts):
dist_file_dict = {}
dist_file_content = dist_file_content.strip(DistributionFiles.STRIP_QUOTES)
if name in self.SEARCH_STRING:
# look for the distribution string in the data and replace according to RELEASE_NAME_MAP
# only the distribution name is set, the version is assumed to be correct from distro.linux_distribution()
if self.SEARCH_STRING[name] in dist_file_content:
# this sets distribution=RedHat if 'Red Hat' shows up in data
dist_file_dict['distribution'] = name
dist_file_dict['distribution_file_search_string'] = self.SEARCH_STRING[name]
else:
# this sets distribution to what's in the data, e.g. CentOS, Scientific, ...
dist_file_dict['distribution'] = dist_file_content.split()[0]
return True, dist_file_dict
if name in self.OS_RELEASE_ALIAS:
if self.OS_RELEASE_ALIAS[name] in dist_file_content:
dist_file_dict['distribution'] = name
return True, dist_file_dict
return False, dist_file_dict
# call a dedicated function for parsing the file content
# TODO: replace with a map or a class
try:
# FIXME: most of these dont actually look at the dist file contents, but random other stuff
distfunc_name = 'parse_distribution_file_' + name
distfunc = getattr(self, distfunc_name)
parsed, dist_file_dict = distfunc(name, dist_file_content, path, collected_facts)
return parsed, dist_file_dict
except AttributeError as exc:
self.module.debug('exc: %s' % exc)
# this should never happen, but if it does fail quietly and not with a traceback
return False, dist_file_dict
return True, dist_file_dict
# to debug multiple matching release files, one can use:
# self.facts['distribution_debug'].append({path + ' ' + name:
# (parsed,
# self.facts['distribution'],
# self.facts['distribution_version'],
# self.facts['distribution_release'],
# )})
def _guess_distribution(self):
# try to find out which linux distribution this is
dist = (get_distribution(), get_distribution_version(), get_distribution_codename())
distribution_guess = {
'distribution': dist[0] or 'NA',
'distribution_version': dist[1] or 'NA',
# distribution_release can be the empty string
'distribution_release': 'NA' if dist[2] is None else dist[2]
}
distribution_guess['distribution_major_version'] = distribution_guess['distribution_version'].split('.')[0] or 'NA'
return distribution_guess
def process_dist_files(self):
# Try to handle the exceptions now ...
# self.facts['distribution_debug'] = []
dist_file_facts = {}
dist_guess = self._guess_distribution()
dist_file_facts.update(dist_guess)
for ddict in self.OSDIST_LIST:
name = ddict['name']
path = ddict['path']
allow_empty = ddict.get('allowempty', False)
has_dist_file, dist_file_content = self._get_dist_file_content(path, allow_empty=allow_empty)
# but we allow_empty. For example, ArchLinux with an empty /etc/arch-release and a
# /etc/os-release with a different name
if has_dist_file and allow_empty:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
dist_file_facts['distribution_file_variety'] = name
break
if not has_dist_file:
# keep looking
continue
parsed_dist_file, parsed_dist_file_facts = self._parse_dist_file(name, dist_file_content, path, dist_file_facts)
# finally found the right os dist file and were able to parse it
if parsed_dist_file:
dist_file_facts['distribution'] = name
dist_file_facts['distribution_file_path'] = path
# distribution and file_variety are the same here, but distribution
# will be changed/mapped to a more specific name.
# ie, dist=Fedora, file_variety=RedHat
dist_file_facts['distribution_file_variety'] = name
dist_file_facts['distribution_file_parsed'] = parsed_dist_file
dist_file_facts.update(parsed_dist_file_facts)
break
return dist_file_facts
# TODO: FIXME: split distro file parsing into its own module or class
def parse_distribution_file_Slackware(self, name, data, path, collected_facts):
slackware_facts = {}
if 'Slackware' not in data:
return False, slackware_facts # TODO: remove
slackware_facts['distribution'] = name
version = re.findall(r'\w+[.]\w+\+?', data)
if version:
slackware_facts['distribution_version'] = version[0]
return True, slackware_facts
def parse_distribution_file_Amazon(self, name, data, path, collected_facts):
amazon_facts = {}
if 'Amazon' not in data:
return False, amazon_facts
amazon_facts['distribution'] = 'Amazon'
version = [n for n in data.split() if n.isdigit()]
version = version[0] if version else 'NA'
amazon_facts['distribution_version'] = version
return True, amazon_facts
def parse_distribution_file_OpenWrt(self, name, data, path, collected_facts):
openwrt_facts = {}
if 'OpenWrt' not in data:
return False, openwrt_facts # TODO: remove
openwrt_facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
openwrt_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
openwrt_facts['distribution_release'] = release.groups()[0]
return True, openwrt_facts
def parse_distribution_file_Alpine(self, name, data, path, collected_facts):
alpine_facts = {}
alpine_facts['distribution'] = 'Alpine'
alpine_facts['distribution_version'] = data
return True, alpine_facts
def parse_distribution_file_SUSE(self, name, data, path, collected_facts):
suse_facts = {}
if 'suse' not in data.lower():
return False, suse_facts # TODO: remove if tested without this
if path == '/etc/os-release':
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution:
suse_facts['distribution'] = distribution.group(1).strip('"')
# example pattern are 13.04 13.0 13
distribution_version = re.search(r'^VERSION_ID="?([0-9]+\.?[0-9]*)"?', line)
if distribution_version:
suse_facts['distribution_version'] = distribution_version.group(1)
suse_facts['distribution_major_version'] = distribution_version.group(1).split('.')[0]
if 'open' in data.lower():
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release:
suse_facts['distribution_release'] = release.groups()[0]
elif 'enterprise' in data.lower() and 'VERSION_ID' in line:
# SLES doesn't got funny release names
release = re.search(r'^VERSION_ID="?[0-9]+\.?([0-9]*)"?', line)
if release.group(1):
release = release.group(1)
else:
release = "0" # no minor number, so it is the first release
suse_facts['distribution_release'] = release
# Starting with SLES4SAP12 SP3 NAME reports 'SLES' instead of 'SLES_SAP'
# According to SuSe Support (SR101182877871) we should use the CPE_NAME to detect SLES4SAP
if re.search("^CPE_NAME=.*sles_sap.*$", line):
suse_facts['distribution'] = 'SLES_SAP'
elif path == '/etc/SuSE-release':
if 'open' in data.lower():
data = data.splitlines()
distdata = get_file_content(path).splitlines()[0]
suse_facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
suse_facts['distribution_release'] = release.groups()[0].strip()
elif 'enterprise' in data.lower():
lines = data.splitlines()
distribution = lines[0].split()[0]
if "Server" in data:
suse_facts['distribution'] = "SLES"
elif "Desktop" in data:
suse_facts['distribution'] = "SLED"
for line in lines:
release = re.search('PATCHLEVEL = ([0-9]+)', line) # SLES doesn't got funny release names
if release:
suse_facts['distribution_release'] = release.group(1)
suse_facts['distribution_version'] = collected_facts['distribution_version'] + '.' + release.group(1)
return True, suse_facts
def parse_distribution_file_Debian(self, name, data, path, collected_facts):
debian_facts = {}
if 'Debian' in data or 'Raspbian' in data:
debian_facts['distribution'] = 'Debian'
release = re.search(r"PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
# Last resort: try to find release from tzdata as either lsb is missing or this is very old debian
if collected_facts['distribution_release'] == 'NA' and 'Debian' in data:
dpkg_cmd = self.module.get_bin_path('dpkg')
if dpkg_cmd:
cmd = "%s --status tzdata|grep Provides|cut -f2 -d'-'" % dpkg_cmd
rc, out, err = self.module.run_command(cmd)
if rc == 0:
debian_facts['distribution_release'] = out.strip()
elif 'Ubuntu' in data:
debian_facts['distribution'] = 'Ubuntu'
# nothing else to do, Ubuntu gets correct info from python functions
elif 'SteamOS' in data:
debian_facts['distribution'] = 'SteamOS'
# nothing else to do, SteamOS gets correct info from python functions
elif path in ('/etc/lsb-release', '/etc/os-release') and ('Kali' in data or 'Parrot' in data):
if 'Kali' in data:
# Kali does not provide /etc/lsb-release anymore
debian_facts['distribution'] = 'Kali'
elif 'Parrot' in data:
debian_facts['distribution'] = 'Parrot'
release = re.search('DISTRIB_RELEASE=(.*)', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif 'Devuan' in data:
debian_facts['distribution'] = 'Devuan'
release = re.search(r"PRETTY_NAME=\"?[^(\"]+ \(?([^) \"]+)\)?", data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1)
elif 'Cumulus' in data:
debian_facts['distribution'] = 'Cumulus Linux'
version = re.search(r"VERSION_ID=(.*)", data)
if version:
major, _minor, _dummy_ver = version.group(1).split(".")
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = major
release = re.search(r'VERSION="(.*)"', data)
if release:
debian_facts['distribution_release'] = release.groups()[0]
elif "Mint" in data:
debian_facts['distribution'] = 'Linux Mint'
version = re.search(r"VERSION_ID=\"(.*)\"", data)
if version:
debian_facts['distribution_version'] = version.group(1)
debian_facts['distribution_major_version'] = version.group(1).split('.')[0]
else:
return False, debian_facts
return True, debian_facts
def parse_distribution_file_Mandriva(self, name, data, path, collected_facts):
mandriva_facts = {}
if 'Mandriva' in data:
mandriva_facts['distribution'] = 'Mandriva'
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
mandriva_facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
mandriva_facts['distribution_release'] = release.groups()[0]
mandriva_facts['distribution'] = name
else:
return False, mandriva_facts
return True, mandriva_facts
def parse_distribution_file_NA(self, name, data, path, collected_facts):
na_facts = {}
for line in data.splitlines():
distribution = re.search("^NAME=(.*)", line)
if distribution and name == 'NA':
na_facts['distribution'] = distribution.group(1).strip('"')
version = re.search("^VERSION=(.*)", line)
if version and collected_facts['distribution_version'] == 'NA':
na_facts['distribution_version'] = version.group(1).strip('"')
return True, na_facts
def parse_distribution_file_Coreos(self, name, data, path, collected_facts):
coreos_facts = {}
# FIXME: pass in ro copy of facts for this kind of thing
distro = get_distribution()
if distro.lower() == 'coreos':
if not data:
# include fix from #15230, #15228
# TODO: verify this is ok for above bugs
return False, coreos_facts
release = re.search("^GROUP=(.*)", data)
if release:
coreos_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, coreos_facts # TODO: remove if tested without this
return True, coreos_facts
def parse_distribution_file_Flatcar(self, name, data, path, collected_facts):
flatcar_facts = {}
distro = get_distribution()
if distro.lower() == 'flatcar':
if not data:
return False, flatcar_facts
release = re.search("^GROUP=(.*)", data)
if release:
flatcar_facts['distribution_release'] = release.group(1).strip('"')
else:
return False, flatcar_facts
return True, flatcar_facts
def parse_distribution_file_ClearLinux(self, name, data, path, collected_facts):
clear_facts = {}
if "clearlinux" not in name.lower():
return False, clear_facts
pname = re.search('NAME="(.*)"', data)
if pname:
if 'Clear Linux' not in pname.groups()[0]:
return False, clear_facts
clear_facts['distribution'] = pname.groups()[0]
version = re.search('VERSION_ID=(.*)', data)
if version:
clear_facts['distribution_major_version'] = version.groups()[0]
clear_facts['distribution_version'] = version.groups()[0]
release = re.search('ID=(.*)', data)
if release:
clear_facts['distribution_release'] = release.groups()[0]
return True, clear_facts
class Distribution(object):
"""
This subclass of Facts fills the distribution, distribution_version and distribution_release variables
To do so it checks the existence and content of typical files in /etc containing distribution information
This is unit tested. Please extend the tests to cover all distributions if you have them available.
"""
# every distribution name mentioned here, must have one of
# - allowempty == True
# - be listed in SEARCH_STRING
# - have a function get_distribution_DISTNAME implemented
OSDIST_LIST = (
{'path': '/etc/oracle-release', 'name': 'OracleLinux'},
{'path': '/etc/slackware-version', 'name': 'Slackware'},
{'path': '/etc/redhat-release', 'name': 'RedHat'},
{'path': '/etc/vmware-release', 'name': 'VMwareESX', 'allowempty': True},
{'path': '/etc/openwrt_release', 'name': 'OpenWrt'},
{'path': '/etc/system-release', 'name': 'Amazon'},
{'path': '/etc/alpine-release', 'name': 'Alpine'},
{'path': '/etc/arch-release', 'name': 'Archlinux', 'allowempty': True},
{'path': '/etc/os-release', 'name': 'SUSE'},
{'path': '/etc/SuSE-release', 'name': 'SUSE'},
{'path': '/etc/gentoo-release', 'name': 'Gentoo'},
{'path': '/etc/os-release', 'name': 'Debian'},
{'path': '/etc/lsb-release', 'name': 'Mandriva'},
{'path': '/etc/altlinux-release', 'name': 'Altlinux'},
{'path': '/etc/sourcemage-release', 'name': 'SMGL'},
{'path': '/usr/lib/os-release', 'name': 'ClearLinux'},
{'path': '/etc/coreos/update.conf', 'name': 'Coreos'},
{'path': '/etc/flatcar/update.conf', 'name': 'Flatcar'},
{'path': '/etc/os-release', 'name': 'NA'},
)
SEARCH_STRING = {
'OracleLinux': 'Oracle Linux',
'RedHat': 'Red Hat',
'Altlinux': 'ALT Linux',
'ClearLinux': 'Clear Linux Software for Intel Architecture',
'SMGL': 'Source Mage GNU/Linux',
}
# keep keys in sync with Conditionals page of docs
OS_FAMILY_MAP = {'RedHat': ['RedHat', 'Fedora', 'CentOS', 'Scientific', 'SLC',
'Ascendos', 'CloudLinux', 'PSBM', 'OracleLinux', 'OVS',
'OEL', 'Amazon', 'Virtuozzo', 'XenServer', 'Alibaba',
'EulerOS', 'openEuler'],
'Debian': ['Debian', 'Ubuntu', 'Raspbian', 'Neon', 'KDE neon',
'Linux Mint', 'SteamOS', 'Devuan', 'Kali', 'Cumulus Linux',
'Pop!_OS', 'Parrot', 'Pardus GNU/Linux'],
'Suse': ['SuSE', 'SLES', 'SLED', 'openSUSE', 'openSUSE Tumbleweed',
'SLES_SAP', 'SUSE_LINUX', 'openSUSE Leap'],
'Archlinux': ['Archlinux', 'Antergos', 'Manjaro'],
'Mandrake': ['Mandrake', 'Mandriva'],
'Solaris': ['Solaris', 'Nexenta', 'OmniOS', 'OpenIndiana', 'SmartOS'],
'Slackware': ['Slackware'],
'Altlinux': ['Altlinux'],
'SGML': ['SGML'],
'Gentoo': ['Gentoo', 'Funtoo'],
'Alpine': ['Alpine'],
'AIX': ['AIX'],
'HP-UX': ['HPUX'],
'Darwin': ['MacOSX'],
'FreeBSD': ['FreeBSD', 'TrueOS'],
'ClearLinux': ['Clear Linux OS', 'Clear Linux Mix'],
'DragonFly': ['DragonflyBSD', 'DragonFlyBSD', 'Gentoo/DragonflyBSD', 'Gentoo/DragonFlyBSD']}
OS_FAMILY = {}
for family, names in OS_FAMILY_MAP.items():
for name in names:
OS_FAMILY[name] = family
def __init__(self, module):
self.module = module
def get_distribution_facts(self):
distribution_facts = {}
# The platform module provides information about the running
# system/distribution. Use this as a baseline and fix buggy systems
# afterwards
system = platform.system()
distribution_facts['distribution'] = system
distribution_facts['distribution_release'] = platform.release()
distribution_facts['distribution_version'] = platform.version()
systems_implemented = ('AIX', 'HP-UX', 'Darwin', 'FreeBSD', 'OpenBSD', 'SunOS', 'DragonFly', 'NetBSD')
if system in systems_implemented:
cleanedname = system.replace('-', '')
distfunc = getattr(self, 'get_distribution_' + cleanedname)
dist_func_facts = distfunc()
distribution_facts.update(dist_func_facts)
elif system == 'Linux':
distribution_files = DistributionFiles(module=self.module)
# linux_distribution_facts = LinuxDistribution(module).get_distribution_facts()
dist_file_facts = distribution_files.process_dist_files()
distribution_facts.update(dist_file_facts)
distro = distribution_facts['distribution']
# look for a os family alias for the 'distribution', if there isnt one, use 'distribution'
distribution_facts['os_family'] = self.OS_FAMILY.get(distro, None) or distro
return distribution_facts
def get_distribution_AIX(self):
aix_facts = {}
rc, out, err = self.module.run_command("/usr/bin/oslevel")
data = out.split('.')
aix_facts['distribution_major_version'] = data[0]
if len(data) > 1:
aix_facts['distribution_version'] = '%s.%s' % (data[0], data[1])
aix_facts['distribution_release'] = data[1]
else:
aix_facts['distribution_version'] = data[0]
return aix_facts
def get_distribution_HPUX(self):
hpux_facts = {}
rc, out, err = self.module.run_command(r"/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search(r'HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
hpux_facts['distribution_version'] = data.groups()[0]
hpux_facts['distribution_release'] = data.groups()[1]
return hpux_facts
def get_distribution_Darwin(self):
darwin_facts = {}
darwin_facts['distribution'] = 'MacOSX'
rc, out, err = self.module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
if data:
darwin_facts['distribution_major_version'] = data.split('.')[0]
darwin_facts['distribution_version'] = data
return darwin_facts
def get_distribution_FreeBSD(self):
freebsd_facts = {}
freebsd_facts['distribution_release'] = platform.release()
data = re.search(r'(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', freebsd_facts['distribution_release'])
if 'trueos' in platform.version():
freebsd_facts['distribution'] = 'TrueOS'
if data:
freebsd_facts['distribution_major_version'] = data.group(1)
freebsd_facts['distribution_version'] = '%s.%s' % (data.group(1), data.group(2))
return freebsd_facts
def get_distribution_OpenBSD(self):
openbsd_facts = {}
openbsd_facts['distribution_version'] = platform.release()
rc, out, err = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.match(r'OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
openbsd_facts['distribution_release'] = match.groups()[0]
else:
openbsd_facts['distribution_release'] = 'release'
return openbsd_facts
def get_distribution_DragonFly(self):
dragonfly_facts = {
'distribution_release': platform.release()
}
rc, out, dummy = self.module.run_command("/sbin/sysctl -n kern.version")
match = re.search(r'v(\d+)\.(\d+)\.(\d+)-(RELEASE|STABLE|CURRENT).*', out)
if match:
dragonfly_facts['distribution_major_version'] = match.group(1)
dragonfly_facts['distribution_version'] = '%s.%s.%s' % match.groups()[:3]
return dragonfly_facts
def get_distribution_NetBSD(self):
netbsd_facts = {}
# FIXME: poking at self.facts, should eventually make these each a collector
platform_release = platform.release()
netbsd_facts['distribution_major_version'] = platform_release.split('.')[0]
return netbsd_facts
def get_distribution_SMGL(self):
smgl_facts = {}
smgl_facts['distribution'] = 'Source Mage GNU/Linux'
return smgl_facts
def get_distribution_SunOS(self):
sunos_facts = {}
data = get_file_content('/etc/release').splitlines()[0]
if 'Solaris' in data:
# for solaris 10 uname_r will contain 5.10, for solaris 11 it will have 5.11
uname_r = get_uname(self.module, flags=['-r'])
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ', '')
ora_prefix = 'Oracle '
sunos_facts['distribution'] = data.split()[0]
sunos_facts['distribution_version'] = data.split()[1]
sunos_facts['distribution_release'] = ora_prefix + data
sunos_facts['distribution_major_version'] = uname_r.split('.')[1].rstrip()
return sunos_facts
uname_v = get_uname(self.module, flags=['-v'])
distribution_version = None
if 'SmartOS' in data:
sunos_facts['distribution'] = 'SmartOS'
if _file_exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').splitlines() if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
sunos_facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
sunos_facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_v is not None and 'NexentaOS_' in uname_v:
sunos_facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if sunos_facts.get('distribution', '') in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
sunos_facts['distribution_release'] = data.strip()
if distribution_version is not None:
sunos_facts['distribution_version'] = distribution_version
elif uname_v is not None:
sunos_facts['distribution_version'] = uname_v.splitlines()[0].strip()
return sunos_facts
return sunos_facts
class DistributionFactCollector(BaseFactCollector):
name = 'distribution'
_fact_ids = set(['distribution_version',
'distribution_release',
'distribution_major_version',
'os_family'])
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
facts_dict = {}
if not module:
return facts_dict
distribution = Distribution(module=module)
distro_facts = distribution.get_distribution_facts()
return distro_facts
|
gpl-3.0
| -1,558,401,853,560,547,600
| 43.969697
| 129
| 0.566872
| false
| 3.904285
| false
| false
| false
|
lsandig/apollon
|
ag.py
|
1
|
6681
|
#! /usr/bin/python3
# Command line program to create svg apollonian circles
# Copyright (c) 2014 Ludger Sandig
# This file is part of apollon.
# Apollon is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Apollon is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Apollon. If not, see <http://www.gnu.org/licenses/>.
import argparse
import sys
import math
from apollon import ApollonianGasket
from coloring import ColorMap, ColorScheme
def parseArguments(argv, colors):
description = "Generate Apollonian Gaskets and save as svg"
name = argv[0]
colors.append('none')
colors.sort()
parser = argparse.ArgumentParser(description=description, prog=name)
parser.add_argument("-d", "--depth", metavar="D", type=int, default=3, help="Recursion depth, generates 2*3^{D+1} circles. Usually safe for D<=10. For higher D use --force if you know what you are doing.")
parser.add_argument("-o", "--output", metavar="", type=str, default="", help="Output file name. If left blank, default is created from circle curvatures.")
parser.add_argument("-r", "--radii", action="store_true", default=False, help="Interpret c1, c2, c3 as radii and not as curvatures")
parser.add_argument("--color", choices=colors, metavar='SCHEME', default='none', help="Color Scheme. Choose from "+", ".join(colors))
parser.add_argument("--treshold", metavar='T', default=0.005, type=float, help="Don't save circles that are too small. Useful for higher depths to reduce filesize.")
parser.add_argument("--force", action="store_true", default=False, help="Use if you want a higher recursion depth than 10.")
parser.add_argument("c1", type=float, help="Curvature of first circle")
parser.add_argument("c2", type=float, help="Curvature of second circle")
parser.add_argument("c3", type=float, help="Curvature of third circle")
return parser.parse_args()
def colorMsg(color):
print("Available color schemes (name: resmin -- resmax)")
for i in color.info():
print("%s: %d -- %d" % (i["name"], i["low"], i["high"]))
def ag_to_svg(circles, colors, tresh=0.005):
"""
Convert a list of circles to svg, optionally color them.
@param circles: A list of L{Circle}s
@param colors: A L{ColorMap} object
@param tresh: Only circles with a radius greater than the product of tresh and maximal radius are saved
"""
svg = []
# Find the biggest circle, which hopefully is the enclosing one
# and has a negative radius because of this. Note that this does
# not have to be the case if we picked an unlucky set of radii at
# the start. If that was the case, we're screwed now.
big = min(circles, key=lambda c: c.r.real)
# Move biggest circle to front so it gets drawn first
circles.remove(big)
circles.insert(0, big)
if big.r.real < 0:
# Bounding box from biggest circle, lower left corner and two
# times the radius as width
corner = big.m - ( abs(big.r) + abs(big.r) * 1j )
vbwidth = abs(big.r)*2
width = 500 # Hardcoded!
# Line width independent of circle size
lw = (vbwidth/width)
svg.append('<svg xmlns="http://www.w3.org/2000/svg" width="%f" height="%f" viewBox="%f %f %f %f">\n' % (width, width, corner.real, corner.imag, vbwidth, vbwidth))
# Keep stroke width relative
svg.append('<g stroke-width="%f">\n' % lw)
# Iterate through circle list, circles with radius<radmin
# will not be saved because they are too small for printing.
radmin = tresh * abs(big.r)
for c in circles:
if abs(c.r) > radmin:
fill = colors.color_for(abs(c.r))
svg.append(( '<circle cx="%f" cy="%f" r="%f" fill="%s" stroke="black"/>\n' % (c.m.real, c.m.imag, abs(c.r), fill)))
svg.append('</g>\n')
svg.append('</svg>\n')
return ''.join(svg)
def impossible_combination(c1, c2, c3):
# If any curvatures x, y, z satisfy the equation
# x = 2*sqrt(y*z) + y + z
# then no fourth enclosing circle can be genereated, because it
# would be a line.
# We need to see for c1, c2, c3 if they could be "x".
impossible = False
sets = [(c1,c2,c3), (c2,c3,c1), (c3,c1,c2)]
for (x, y, z) in sets:
if x == 2*math.sqrt(y*z) + y + z:
impossible = True
return impossible
def main():
color = ColorScheme("colorbrewer.json")
available = [d['name'] for d in color.info()]
args = parseArguments(sys.argv, available)
# Sanity checks
for c in [args.c1, args.c2, args.c3]:
if c == 0:
print("Error: curvature or radius can't be 0")
exit(1)
if impossible_combination(args.c1, args.c2, args.c3):
print("Error: no apollonian gasket possible for these curvatures")
exit(1)
# Given curvatures were in fact radii, so take the reciprocal
if args.radii:
args.c1 = 1/args.c1
args.c2 = 1/args.c2
args.c3 = 1/args.c3
ag = ApollonianGasket(args.c1, args.c2, args.c3)
# At a recursion depth > 10 things start to get serious.
if args.depth > 10:
if not args.force:
print("Note: Number of cicles increases exponentially with 2*3^{D+1} at depth D.\nIf you want to use D>10, specify the --force option.")
args.depth = 10
ag.generate(args.depth)
# Get smallest and biggest radius
smallest = abs(min(ag.genCircles, key=lambda c: abs(c.r.real)).r.real)
biggest = abs(max(ag.genCircles, key=lambda c: abs(c.r.real)).r.real)
# Construct color map
if args.color == 'none':
mp = ColorMap('none')
else:
# TODO: resolution of 8 is hardcoded, some color schemes have
# resolutions up to 11. Make this configurable.
mp = color.makeMap(smallest, biggest, args.color, 8)
svg = ag_to_svg(ag.genCircles, mp, tresh=args.treshold)
# User supplied filename? If not, we need to construct something.
if len(args.output) == 0:
args.output = 'ag_%.4f_%.4f_%.4f.svg' % (args.c1, args.c2, args.c3)
with open(args.output, 'w') as f:
f.write(svg)
f.close()
if( __name__ == "__main__" ):
main()
|
gpl-3.0
| 5,160,250,914,175,473,000
| 36.960227
| 209
| 0.640024
| false
| 3.369138
| false
| false
| false
|
AustereCuriosity/astropy
|
astropy/config/paths.py
|
1
|
10744
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
""" This module contains functions to determine where configuration and
data/cache files used by Astropy should be placed.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..extern import six
from ..utils.decorators import wraps
import os
import shutil
import sys
__all__ = ['get_config_dir', 'get_cache_dir', 'set_temp_config',
'set_temp_cache']
def _find_home():
""" Locates and return the home directory (or best approximation) on this
system.
Raises
------
OSError
If the home directory cannot be located - usually means you are running
Astropy on some obscure platform that doesn't have standard home
directories.
"""
# this is used below to make fix up encoding issues that sometimes crop up
# in py2.x but not in py3.x
if six.PY2:
decodepath = lambda pth: pth.decode(sys.getfilesystemencoding())
else:
decodepath = lambda pth: pth
# First find the home directory - this is inspired by the scheme ipython
# uses to identify "home"
if os.name == 'posix':
# Linux, Unix, AIX, OS X
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find unix home directory to search for '
'astropy config dir')
elif os.name == 'nt': # This is for all modern Windows (NT or after)
if 'MSYSTEM' in os.environ and os.environ.get('HOME'):
# Likely using an msys shell; use whatever it is using for its
# $HOME directory
homedir = decodepath(os.environ['HOME'])
# Next try for a network home
elif 'HOMESHARE' in os.environ:
homedir = decodepath(os.environ['HOMESHARE'])
# See if there's a local home
elif 'HOMEDRIVE' in os.environ and 'HOMEPATH' in os.environ:
homedir = os.path.join(os.environ['HOMEDRIVE'],
os.environ['HOMEPATH'])
homedir = decodepath(homedir)
# Maybe a user profile?
elif 'USERPROFILE' in os.environ:
homedir = decodepath(os.path.join(os.environ['USERPROFILE']))
else:
try:
from ..extern.six.moves import winreg as wreg
shell_folders = r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
key = wreg.OpenKey(wreg.HKEY_CURRENT_USER, shell_folders)
homedir = wreg.QueryValueEx(key, 'Personal')[0]
homedir = decodepath(homedir)
key.Close()
except Exception:
# As a final possible resort, see if HOME is present
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find windows home directory to '
'search for astropy config dir')
else:
# for other platforms, try HOME, although it probably isn't there
if 'HOME' in os.environ:
homedir = decodepath(os.environ['HOME'])
else:
raise OSError('Could not find a home directory to search for '
'astropy config dir - are you on an unspported '
'platform?')
return homedir
def get_config_dir(create=True):
"""
Determines the Astropy configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.astropy/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_config, that overrides all
if set_temp_config._temp_path is not None:
xch = set_temp_config._temp_path
config_path = os.path.join(xch, 'astropy')
if not os.path.exists(config_path):
os.mkdir(config_path)
return os.path.abspath(config_path)
# first look for XDG_CONFIG_HOME
xch = os.environ.get('XDG_CONFIG_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('config', linkto))
def get_cache_dir():
"""
Determines the Astropy cache directory name and creates the directory if it
doesn't exist.
This directory is typically ``$HOME/.astropy/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/astropy`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
"""
# symlink will be set to this if the directory is created
linkto = None
# If using set_temp_cache, that overrides all
if set_temp_cache._temp_path is not None:
xch = set_temp_cache._temp_path
cache_path = os.path.join(xch, 'astropy')
if not os.path.exists(cache_path):
os.mkdir(cache_path)
return os.path.abspath(cache_path)
# first look for XDG_CACHE_HOME
xch = os.environ.get('XDG_CACHE_HOME')
if xch is not None and os.path.exists(xch):
xchpth = os.path.join(xch, 'astropy')
if not os.path.islink(xchpth):
if os.path.exists(xchpth):
return os.path.abspath(xchpth)
else:
linkto = xchpth
return os.path.abspath(_find_or_create_astropy_dir('cache', linkto))
class _SetTempPath(object):
_temp_path = None
_default_path_getter = None
def __init__(self, path=None, delete=False):
if path is not None:
path = os.path.abspath(path)
self._path = path
self._delete = delete
self._prev_path = self.__class__._temp_path
def __enter__(self):
self.__class__._temp_path = self._path
return self._default_path_getter()
def __exit__(self, *args):
self.__class__._temp_path = self._prev_path
if self._delete and self._path is not None:
shutil.rmtree(self._path)
def __call__(self, func):
"""Implements use as a decorator."""
@wraps(func)
def wrapper(*args, **kwargs):
with self:
func(*args, **kwargs)
return wrapper
class set_temp_config(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy config, primarily
for use with testing.
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the config path
just within that function.
Parameters
----------
path : str, optional
The directory (which must exist) in which to find the Astropy config
files, or create them if they do not already exist. If None, this
restores the config path to the user's default config path as returned
by `get_config_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_config_dir)
def __enter__(self):
# Special case for the config case, where we need to reset all the
# cached config objects
from .configuration import _cfgobjs
path = super(set_temp_config, self).__enter__()
_cfgobjs.clear()
return path
def __exit__(self, *args):
from .configuration import _cfgobjs
super(set_temp_config, self).__exit__(*args)
_cfgobjs.clear()
class set_temp_cache(_SetTempPath):
"""
Context manager to set a temporary path for the Astropy download cache,
primarily for use with testing (though there may be other applications
for setting a different cache directory, for example to switch to a cache
dedicated to large files).
If the path set by this context manager does not already exist it will be
created, if possible.
This may also be used as a decorator on a function to set the cache path
just within that function.
Parameters
----------
path : str
The directory (which must exist) in which to find the Astropy cache
files, or create them if they do not already exist. If None, this
restores the cache path to the user's default cache path as returned
by `get_cache_dir` as though this context manager were not in effect
(this is useful for testing). In this case the ``delete`` argument is
always ignored.
delete : bool, optional
If True, cleans up the temporary directory after exiting the temp
context (default: False).
"""
_default_path_getter = staticmethod(get_cache_dir)
def _find_or_create_astropy_dir(dirnm, linkto):
innerdir = os.path.join(_find_home(), '.astropy')
maindir = os.path.join(_find_home(), '.astropy', dirnm)
if not os.path.exists(maindir):
# first create .astropy dir if needed
if not os.path.exists(innerdir):
try:
os.mkdir(innerdir)
except OSError:
if not os.path.isdir(innerdir):
raise
elif not os.path.isdir(innerdir):
msg = 'Intended Astropy directory {0} is actually a file.'
raise IOError(msg.format(innerdir))
try:
os.mkdir(maindir)
except OSError:
if not os.path.isdir(maindir):
raise
if (not sys.platform.startswith('win') and
linkto is not None and
not os.path.exists(linkto)):
os.symlink(maindir, linkto)
elif not os.path.isdir(maindir):
msg = 'Intended Astropy {0} directory {1} is actually a file.'
raise IOError(msg.format(dirnm, maindir))
return os.path.abspath(maindir)
|
bsd-3-clause
| 6,146,739,670,736,519,000
| 33.107937
| 99
| 0.612249
| false
| 4.091394
| true
| false
| false
|
sqall01/alertR
|
alertClientExecuter/alertRclient.py
|
1
|
12421
|
#!/usr/bin/env python3
# written by sqall
# twitter: https://twitter.com/sqall01
# blog: https://h4des.org
# github: https://github.com/sqall01
#
# Licensed under the GNU Affero General Public License, version 3.
import sys
import os
import stat
from lib import ServerCommunication, ConnectionWatchdog, Receiver
from lib import SMTPAlert
from lib import ExecuterAlert, AlertEventHandler
from lib import GlobalData
import logging
import time
import random
import xml.etree.ElementTree
# Function creates a path location for the given user input.
def make_path(input_location: str) -> str:
# Do nothing if the given location is an absolute path.
if input_location[0] == "/":
return input_location
# Replace ~ with the home directory.
elif input_location[0] == "~":
pos = -1
for i in range(1, len(input_location)):
if input_location[i] == "/":
continue
pos = i
break
if pos == -1:
return os.environ["HOME"]
return os.path.join(os.environ["HOME"], input_location[pos:])
# Assume we have a given relative path.
return os.path.join(os.path.dirname(os.path.abspath(__file__)), input_location)
if __name__ == '__main__':
# generate object of the global needed data
globalData = GlobalData()
fileName = os.path.basename(__file__)
# parse config file, get logfile configurations
# and initialize logging
try:
configRoot = xml.etree.ElementTree.parse(globalData.configFile).getroot()
logfile = make_path(str(configRoot.find("general").find("log").attrib["file"]))
# parse chosen log level
tempLoglevel = str(configRoot.find("general").find("log").attrib["level"])
tempLoglevel = tempLoglevel.upper()
if tempLoglevel == "DEBUG":
loglevel = logging.DEBUG
elif tempLoglevel == "INFO":
loglevel = logging.INFO
elif tempLoglevel == "WARNING":
loglevel = logging.WARNING
elif tempLoglevel == "ERROR":
loglevel = logging.ERROR
elif tempLoglevel == "CRITICAL":
loglevel = logging.CRITICAL
else:
raise ValueError("No valid log level in config file.")
# initialize logging
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=logfile,
level=loglevel)
except Exception as e:
print("Config could not be parsed.")
print(e)
sys.exit(1)
# parse the rest of the config with initialized logging
try:
# Check file permission of config file (do not allow it to be accessible by others).
config_stat = os.stat(globalData.configFile)
if (config_stat.st_mode & stat.S_IROTH
or config_stat.st_mode & stat.S_IWOTH
or config_stat.st_mode & stat.S_IXOTH):
raise ValueError("Config file is accessible by others. Please remove file permissions for others.")
# check if config and client version are compatible
version = float(configRoot.attrib["version"])
if version != globalData.version:
raise ValueError("Config version '%.3f' not "
% version
+ "compatible with client version '%.3f'."
% globalData.version)
# parse server configurations
server = str(configRoot.find("general").find("server").attrib["host"])
serverPort = int(configRoot.find("general").find("server").attrib["port"])
# get server certificate file and check if it does exist
serverCAFile = os.path.abspath(make_path(str(configRoot.find("general").find("server").attrib["caFile"])))
if os.path.exists(serverCAFile) is False:
raise ValueError("Server CA does not exist.")
# get client certificate and keyfile (if required)
certificateRequired = (str(configRoot.find("general").find(
"client").attrib["certificateRequired"]).upper() == "TRUE")
if certificateRequired is True:
clientCertFile = os.path.abspath(
make_path(str(configRoot.find("general").find("client").attrib["certFile"])))
clientKeyFile = os.path.abspath(
make_path(str(configRoot.find("general").find("client").attrib["keyFile"])))
if (os.path.exists(clientCertFile) is False
or os.path.exists(clientKeyFile) is False):
raise ValueError("Client certificate or key does not exist.")
key_stat = os.stat(clientKeyFile)
if (key_stat.st_mode & stat.S_IROTH
or key_stat.st_mode & stat.S_IWOTH
or key_stat.st_mode & stat.S_IXOTH):
raise ValueError("Client key is accessible by others. Please remove file permissions for others.")
else:
clientCertFile = None
clientKeyFile = None
# get user credentials
username = str(configRoot.find("general").find("credentials").attrib["username"])
password = str(configRoot.find("general").find("credentials").attrib["password"])
# Get connection settings.
temp = (str(configRoot.find("general").find("connection").attrib["persistent"]).upper() == "TRUE")
if temp:
globalData.persistent = 1
else:
globalData.persistent = 0
# parse smtp options if activated
smtpActivated = (str(configRoot.find("smtp").find("general").attrib["activated"]).upper() == "TRUE")
smtpServer = ""
smtpPort = -1
smtpFromAddr = ""
smtpToAddr = ""
if smtpActivated is True:
smtpServer = str(configRoot.find("smtp").find("server").attrib["host"])
smtpPort = int(configRoot.find("smtp").find("server").attrib["port"])
smtpFromAddr = str(configRoot.find("smtp").find("general").attrib["fromAddr"])
smtpToAddr = str(configRoot.find("smtp").find("general").attrib["toAddr"])
# parse all alerts
for item in configRoot.find("alerts").iterfind("alert"):
alert = ExecuterAlert()
# Get executer specific values.
temp_execute = make_path(str(item.find("executer").attrib["execute"]))
alert.cmd_triggered_list.append(temp_execute)
alert.cmd_normal_list.append(temp_execute)
alert.cmd_profile_change_list.append(temp_execute)
# Parse all arguments that are used for the command when
# a sensor alert with state "triggered" is received.
cmd_triggered_activated = str(item.find("executer").find("triggered").attrib["activated"]).upper() == "TRUE"
alert.cmd_triggered_activated = cmd_triggered_activated
if cmd_triggered_activated:
for argument in item.find("executer").find("triggered").iterfind("argument"):
alert.cmd_triggered_list.append(str(argument.text))
# Parse all arguments that are used for the command when
# a sensor alert with state "normal" is received.
cmd_normal_activated = str(item.find("executer").find("normal").attrib["activated"]).upper() == "TRUE"
alert.cmd_normal_activated = cmd_normal_activated
if cmd_normal_activated:
for argument in item.find("executer").find("normal").iterfind("argument"):
alert.cmd_normal_list.append(str(argument.text))
# Parse all arguments that are used for the command when
# a profile change message is received.
cmd_profile_change_activated = str(
item.find("executer").find("profilechange").attrib["activated"]).upper() == "TRUE"
alert.cmd_profile_change_activated = cmd_profile_change_activated
if cmd_profile_change_activated:
for profile in item.find("executer").find("profilechange").iterfind("profile"):
alert.cmd_profile_change_target_profiles.add(int(profile.text))
for argument in item.find("executer").find("profilechange").iterfind("argument"):
alert.cmd_profile_change_list.append(str(argument.text))
# these options are needed by the server to
# differentiate between the registered alerts
alert.id = int(item.find("general").attrib["id"])
alert.description = str(item.find("general").attrib["description"])
alert.alertLevels = list()
for alertLevelXml in item.iterfind("alertLevel"):
alert.alertLevels.append(int(alertLevelXml.text))
# check if description is empty
if len(alert.description) == 0:
raise ValueError("Description of alert %d is empty."
% alert.id)
# check if the id of the alert is unique
for registeredAlert in globalData.alerts:
if registeredAlert.id == alert.id:
raise ValueError("Id of alert %d is already taken."
% alert.id)
if cmd_profile_change_activated and not alert.cmd_profile_change_target_profiles:
raise ValueError("No profiles set for profilechange of alert %d."
% alert.id)
globalData.alerts.append(alert)
except Exception as e:
logging.exception("[%s]: Could not parse config." % fileName)
sys.exit(1)
random.seed()
# check if smtp is activated => generate object to send eMail alerts
if smtpActivated is True:
globalData.smtpAlert = SMTPAlert(smtpServer, smtpPort, smtpFromAddr, smtpToAddr)
else:
globalData.smtpAlert = None
# generate object for the communication to the server and connect to it
globalData.serverComm = ServerCommunication(server,
serverPort,
serverCAFile,
username,
password,
clientCertFile,
clientKeyFile,
AlertEventHandler(globalData),
globalData)
connectionRetries = 1
logging.info("[%s]: Connecting to server." % fileName)
while True:
# check if 5 unsuccessful attempts are made to connect
# to the server and if smtp alert is activated
# => send eMail alert
if (globalData.smtpAlert is not None
and (connectionRetries % 5) == 0):
globalData.smtpAlert.sendCommunicationAlert(connectionRetries)
if globalData.serverComm.initialize() is True:
# if smtp alert is activated
# => send email that communication problems are solved
if globalData.smtpAlert is not None:
globalData.smtpAlert.sendCommunicationAlertClear()
connectionRetries = 1
break
connectionRetries += 1
logging.critical("[%s]: Connecting to server failed. Try again in 5 seconds." % fileName)
time.sleep(5)
# when connected => generate watchdog object to monitor the
# server connection
logging.info("[%s]: Starting watchdog thread." % fileName)
watchdog = ConnectionWatchdog(globalData.serverComm,
globalData.pingInterval,
globalData.smtpAlert)
# set thread to daemon
# => threads terminates when main thread terminates
watchdog.daemon = True
watchdog.start()
# initialize all alerts
logging.info("[%s] Initializing alerts." % fileName)
for alert in globalData.alerts:
alert.initialize()
logging.info("[%s]: Client started." % fileName)
# generate receiver to handle incoming data (for example status updates)
# (note: we will not return from the receiver unless the client is terminated)
receiver = Receiver(globalData.serverComm)
receiver.run()
|
agpl-3.0
| 694,129,595,070,432,500
| 41.979239
| 120
| 0.591901
| false
| 4.605488
| true
| false
| false
|
kwikteam/phy
|
tools/api.py
|
1
|
7682
|
# -*- coding: utf-8 -*-
"""Minimal API documentation generation."""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
from importlib import import_module
import inspect
import os.path as op
import re
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
def _name(obj):
if hasattr(obj, '__name__'):
return obj.__name__
elif inspect.isdatadescriptor(obj):
return obj.fget.__name__
def _full_name(subpackage, obj):
return '{}.{}'.format(subpackage.__name__, _name(obj))
def _anchor(name):
anchor = name.lower().replace(' ', '-')
anchor = re.sub(r'[^\w\- ]', '', anchor)
return anchor
_docstring_header_pattern = re.compile(r'^([^\n]+)\n[\-\=]{3,}$', flags=re.MULTILINE)
_docstring_parameters_pattern = re.compile(r'^([^ \n]+) \: ([^\n]+)$', flags=re.MULTILINE)
def _replace_docstring_header(paragraph):
"""Process NumPy-like function docstrings."""
# Replace Markdown headers in docstrings with light headers in bold.
paragraph = re.sub(_docstring_header_pattern, r'**\1**', paragraph)
paragraph = re.sub(_docstring_parameters_pattern, r'\n* `\1 : \2` ', paragraph)
return paragraph
def _doc(obj):
doc = inspect.getdoc(obj) or ''
doc = doc.strip()
if r'\n\n' in doc:
i = doc.index(r'\n\n')
doc[:i] = re.sub(r'\n(?!=\n)', '', doc[:i]) # remove standalone newlines
if doc and '---' in doc:
return _replace_docstring_header(doc)
else:
return doc
#------------------------------------------------------------------------------
# Introspection methods
#------------------------------------------------------------------------------
def _is_public(obj):
name = _name(obj) if not isinstance(obj, str) else obj
if name:
return not name.startswith('_')
else:
return True
def _is_defined_in_package(obj, package):
if isinstance(obj, property):
obj = obj.fget
mod = inspect.getmodule(obj)
if mod and hasattr(mod, '__name__'):
name = mod.__name__
return name.split('.')[0].startswith(package)
return True
def _iter_doc_members(obj, package=None):
for name, member in inspect.getmembers(obj):
if _is_public(name):
if package is None or _is_defined_in_package(member, package):
yield member
def _iter_subpackages(package, subpackages):
"""Iterate through a list of subpackages."""
for subpackage in subpackages:
yield import_module('{}.{}'.format(package, subpackage))
def _iter_vars(mod):
"""Iterate through a list of variables define in a module's public namespace."""
vars = sorted(var for var in dir(mod) if _is_public(var))
for var in vars:
yield getattr(mod, var)
def _iter_functions(subpackage):
return filter(inspect.isfunction, _iter_vars(subpackage))
def _iter_classes(subpackage):
return filter(inspect.isclass, _iter_vars(subpackage))
def _iter_methods(klass, package=None):
for member in _iter_doc_members(klass, package):
if inspect.isfunction(member) or inspect.ismethod(member):
if inspect.isdatadescriptor(member):
continue
yield member
def _iter_properties(klass, package=None):
for member in _iter_doc_members(klass, package):
if isinstance(member, property):
yield member.fget
#------------------------------------------------------------------------------
# API doc generation
#------------------------------------------------------------------------------
def _function_header(subpackage, func):
"""Generate the docstring of a function."""
args = str(inspect.signature(func))
return "{name}{args}".format(name=_full_name(subpackage, func), args=args)
_FUNCTION_PATTERN = '%s\n\n\n**`%s`**\n\n%s\n\n---'
def _doc_function(subpackage, func):
title = _full_name(subpackage, func)
return _FUNCTION_PATTERN % (title, _function_header(subpackage, func), _doc(func))
def _doc_method(klass, func):
"""Generate the docstring of a method."""
args = str(inspect.signature(func))
title = "{klass}.{name}".format(klass=klass.__name__, name=_name(func))
header = "{klass}.{name}{args}".format(klass=klass.__name__, name=_name(func), args=args)
docstring = _doc(func)
return _FUNCTION_PATTERN % (title, header, docstring)
def _doc_property(klass, prop):
"""Generate the docstring of a property."""
header = "{klass}.{name}".format(klass=klass.__name__, name=_name(prop))
docstring = _doc(prop)
return _FUNCTION_PATTERN % (header, header, docstring)
def _link(name, anchor=None):
return "[{name}](#{anchor})".format(name=name, anchor=anchor or _anchor(name))
def _generate_preamble(package, subpackages):
yield "# API documentation of {}".format(package)
yield _doc(import_module(package))
yield "## Table of contents"
# Table of contents: list of modules.
for subpackage in _iter_subpackages(package, subpackages):
subpackage_name = subpackage.__name__
yield "### " + _link(subpackage_name)
# List of top-level functions in the subpackage.
for func in _iter_functions(subpackage):
yield '* ' + _link(
_full_name(subpackage, func), _anchor(_full_name(subpackage, func)))
# All public classes.
for klass in _iter_classes(subpackage):
# Class documentation.
yield "* " + _link(_full_name(subpackage, klass))
yield ""
yield ""
def _generate_paragraphs(package, subpackages):
"""Generate the paragraphs of the API documentation."""
# API doc of each module.
for subpackage in _iter_subpackages(package, subpackages):
subpackage_name = subpackage.__name__
yield "## {}".format(subpackage_name)
# Subpackage documentation.
yield _doc(import_module(subpackage_name))
yield "---"
# List of top-level functions in the subpackage.
for func in _iter_functions(subpackage):
yield '#### ' + _doc_function(subpackage, func)
# All public classes.
for klass in _iter_classes(subpackage):
# Class documentation.
yield "### {}".format(_full_name(subpackage, klass))
yield _doc(klass)
yield "---"
for method in _iter_methods(klass, package):
yield '#### ' + _doc_method(klass, method)
for prop in _iter_properties(klass, package):
yield '#### ' + _doc_property(klass, prop)
def _print_paragraph(paragraph):
out = ''
out += paragraph + '\n'
if not paragraph.startswith('* '):
out += '\n'
return out
def generate_api_doc(package, subpackages, path=None):
out = ''
for paragraph in _generate_preamble(package, subpackages):
out += _print_paragraph(paragraph)
for paragraph in _generate_paragraphs(package, subpackages):
out += _print_paragraph(paragraph)
if path is None:
return out
else:
with open(path, 'w') as f:
f.write('\n'.join([_.rstrip() for _ in out.splitlines()]))
if __name__ == '__main__':
package = 'phy'
subpackages = ['utils', 'gui', 'plot', 'cluster', 'apps', 'apps.template', 'apps.kwik']
curdir = op.dirname(op.realpath(__file__))
path = op.join(curdir, '../docs/api.md')
generate_api_doc(package, subpackages, path=path)
|
bsd-3-clause
| -4,705,370,727,807,438,000
| 29.355731
| 93
| 0.564193
| false
| 4.004171
| false
| false
| false
|
thinkle/gourmet
|
gourmet/check_encodings.py
|
1
|
10430
|
from typing import Dict
from gi.repository import Gtk
from .gdebug import debug
from .gtk_extras import dialog_extras as de
from gettext import gettext as _
from .prefs import Prefs
class CheckEncoding:
"""A class to read a file and guess the correct text encoding."""
encodings = ['iso8859', 'ascii', 'latin_1', 'cp850', 'cp1252', 'utf-8']
all_encodings = ['ascii', 'cp037', 'cp424', 'cp437', 'cp500', 'cp737',
'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857',
'cp860', 'cp861', 'cp862', 'cp863', 'cp864', 'cp865',
'cp869', 'cp874', 'cp875', 'cp1006', 'cp1026', 'cp1140',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254',
'cp1255', 'cp1256', 'cp1258', 'latin_1', 'iso8859_2',
'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_8', 'iso8859_9', 'iso8859_10',
'iso8859_13', 'iso8859_14', 'iso8859_15', 'koi8_r',
'koi8_u', 'mac_cyrillic', 'mac_greek', 'mac_iceland',
'mac_latin2', 'mac_roman', 'mac_turkish', 'utf_16',
'utf_16_be', 'utf_16_le', 'utf_7', 'utf_8']
def __init__(self, file, encodings=None):
if Prefs.instance().get('utf-16', False):
self.encodings.extend(['utf_16', 'utf_16_le', 'utf_16_be'])
if encodings is not None:
self.encodings = encodings
if isinstance(file, str):
file = open(file, 'rb')
self.txt = file.read()
file.close()
def test_encodings(self):
"""Move through self.encodings one at a time and return the first
encoding that decodes our text cleanly. We return a tuple (encoding,decoded_text)"""
for e in self.encodings:
try:
t=self.txt.decode(e)
return (e,t)
except UnicodeDecodeError:
pass
def get_encodings (self):
encs = self.test_all_encodings(self.encodings)
if encs:
return encs
else:
return self.test_all_encodings(self.all_encodings)
def test_all_encodings (self,encodings=None):
"""Test all encodings and return a dictionary of possible encodings."""
if not encodings:
encodings=self.all_encodings
self.possible_encodings = {}
for e in encodings:
try:
d=self.txt.decode(e)
if d and (d not in self.possible_encodings.values()):
# if we don't already have this possibility, add
self.possible_encodings[e] = d
except UnicodeDecodeError:
pass
return self.possible_encodings
class GetFile(CheckEncoding):
"""Handed a filename, return a list of lines."""
def __init__(self, file: str, encodings=None):
super().__init__(file, encodings)
encs: Dict[str, str] = self.get_encodings()
if encs:
if len(list(encs.keys())) > 1:
encoding = getEncoding(encodings=encs)
else:
encoding = list(encs.keys())[0]
self.enc = encoding
self.lines = encs[self.enc].splitlines()
debug('reading file %s as encoding %s'%(file, self.enc))
else:
raise Exception("Cannot decode file %s" % file)
def get_file(file: str, encodings=None):
gf = GetFile(file, encodings)
return gf.lines
class EncodingDialog(de.OptionDialog):
"""Create a dialog to allow user to select correct encoding for an input file."""
context_lines = 2
def __init__(self, default=None, label=_("Select encoding"),
sublabel=_("Cannot determine proper encoding. Please select the correct encoding from the following list."),
expander_label=_("See _file with encoding"),
encodings=None):
self.diff_lines = {}
self.cursor_already_set = False
self.expander_label = expander_label
self.encodings = encodings if encodings is not None else {}
self.current_error = 0
self.diff_texts()
self.options = self.create_options()
expander = self.create_expander()
self.setup_buffers()
super().__init__(default=default, label=label, sublabel=sublabel,
options=self.options, expander=expander)
self.set_default_size(700, 500)
self.combobox.connect('changed', self.change_encoding)
self.change_encoding()
self.created = False
self.expander.set_expanded(True)
def setup_motion_buttons (self):
self.hbb = Gtk.HButtonBox()
self.fb = Gtk.Button('Next Difference')
self.pb = Gtk.Button('Previous Difference')
self.pb.connect('clicked',lambda *args: self.move_to_difference(forward=False))
self.fb.connect('clicked',lambda *args: self.move_to_difference(forward=True))
self.hbb.add(self.pb)
self.hbb.add(self.fb)
self.evb.add(self.hbb)
self.hbb.show_all()
def get_option(self, widget):
super().get_option(widget)
self.change_encoding()
def create_options (self):
options = list(self.encodings.keys())
masterlist = CheckEncoding.encodings + CheckEncoding.all_encodings
options.sort(key=lambda x: masterlist.index(x))
return options
def create_expander(self):
self.evb = Gtk.VBox(vexpand=True)
self.tv = Gtk.TextView()
self.tv.set_editable(False)
self.buffer = self.tv.get_buffer()
self.evb.pack_start(self.tv, expand=True, fill=True, padding=0)
self.evb.show_all()
return self.expander_label, self.evb
def setup_buffers (self):
self.encoding_buffers={}
for k,t in list(self.encodings.items()):
self.encoding_buffers[k]=Gtk.TextBuffer()
self.highlight_tags = [self.encoding_buffers[k].create_tag(background='yellow')]
self.line_highlight_tags = [self.encoding_buffers[k].create_tag(background='green')]
self.set_buffer_text(self.encoding_buffers[k],t)
def change_encoding (self, _widget=None):
if self.cursor_already_set:
im=self.buffer.get_insert()
ti=self.buffer.get_iter_at_mark(im)
offset=ti.get_offset()
self.tv.set_buffer(self.encoding_buffers[self.ret])
self.buffer = self.encoding_buffers[self.ret]
debug('changed text to encoding %s'%self.ret,0)
def move_to_difference (self, forward=True):
dkeys = list(self.diff_lines.keys())
dkeys.sort()
if forward:
self.current_error += 1
else:
self.current_error = self.current_error - 1
if self.current_error > len(dkeys): self.current_error = 0
if self.current_error < 0: self.current_error = len(dkeys)-1
mark=self.buffer.create_mark(
None,
self.buffer.get_iter_at_line_index(dkeys[self.current_error],0),
False,
)
self.tv.scroll_to_mark(mark,0)
def set_buffer_text (self, buffer, text):
"""Set buffer text to show encoding differences."""
lines = text.splitlines()
totl = len(lines)
shown = []
for line,diffs in list(self.diff_lines.items()):
if line in shown: continue
start_at = line - self.context_lines
if start_at < 0: start_at = 0
end_at = line + self.context_lines
if end_at >= totl: end_at = totl-1
if start_at != 0:
buffer.insert_with_tags(buffer.get_end_iter(),
'\n...\n',
)
for n in range(start_at,end_at):
if n in shown:
continue
shown.append(n)
l = lines[n]
if n==line:
start = 0
for sdiff,ediff in diffs:
buffer.insert_with_tags(buffer.get_end_iter(),
l[start:sdiff],
*self.line_highlight_tags)
buffer.insert_with_tags(buffer.get_end_iter(),
l[sdiff:ediff],
*self.highlight_tags)
start = ediff
buffer.insert_with_tags(buffer.get_end_iter(),
l[start:],
*self.line_highlight_tags)
else:
buffer.insert_with_tags(buffer.get_end_iter(),l)
def diff_texts(self):
"""Compare different encoding for characters where they differ."""
encoded_buffers = list(self.encodings.values())
# Sort by number of newlines (most first)
encoded_buffers.sort(key=lambda x: len(x.splitlines()), reverse=True)
enc1 = encoded_buffers[0]
enc_rest = [e.splitlines() for e in encoded_buffers[1:]]
for linenum, line in enumerate(enc1.splitlines()):
other_lines = [len(e) > linenum and e[linenum] for e in enc_rest]
# Remove any Falses returned by above
other_lines = [x for x in other_lines if not isinstance(x, bool)]
# If not all lines are the same, create a diff marking where they
# differ.
if not all(line == ol for ol in other_lines):
ranges = []
for chnum, ch in enumerate(line):
# Check that the lines are the same. If not, mark where
if not all([len(line) > chnum and ch == line[chnum]
for line in other_lines]):
if ranges and ranges[-1][1] == chnum:
ranges[-1][1] = chnum+1
else:
ranges.append([chnum, chnum+1])
self.diff_lines[linenum] = ranges
def getEncoding(*args, **kwargs):
dialog = EncodingDialog(*args, **kwargs)
result = dialog.run()
if (not result) and dialog.encodings:
return dialog.options[0]
elif not result:
return 'ascii'
else:
return result
|
gpl-2.0
| -2,510,780,388,387,652,600
| 40.553785
| 125
| 0.541611
| false
| 3.976363
| false
| false
| false
|
jakenjarvis/pyOss
|
MasterlistChecker.py
|
1
|
12321
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This file encoding UTF-8 no BOM. このファイルの文字コードはUTF-8 BOM無しです。
################################################################################
__appname__ = "MasterlistChecker.py"
__author__ = "Jaken<Jaken.Jarvis@gmail.com>"
__copyright__ = "Copyright 2010, Jaken"
__license__ = """
GNU General Public License v3
This file is part of pyOss.
Copyright (C) 2010 Jaken.(jaken.jarvis@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__version__ = "1.0.0"
__credits__ = [
'"Jaken" <Jaken.Jarvis@gmail.com>',
]
################################################################################
# Import
################################################################################
import sys
import os
import codecs
import re
reload(sys)
sys.setdefaultencoding('utf-8')
import uuid
from optparse import OptionParser
from pyOssLib.v1_0.MasterlistLib import *
from pyOssLib.v1_0.UserlistLib import *
################################################################################
# Global variable
################################################################################
# [masterlist] ESM、ESPファイル検出正規表現(「>」と「<」は、ファイルとして認識させる)
regexMods = re.compile(ur"^([><]?)([^><\\%?*:\"$^]{1}[^\\><:\"/|?*]*[.](esm|esp))\s*.*$", re.IGNORECASE)
# [masterlist] コメントorコマンド行検出正規表現
regexCommand = re.compile(ur"^([><]?)([\\%?*:\"$^]{1})\s*(.*)$")
# [masterlist] グループ開始検出正規表現 \BeginGroup\: Post BSA
regexBeginGroup = re.compile(ur"^\\BeginGroup\\:(.*)", re.IGNORECASE)
# [masterlist] グループ終了検出正規表現 \EndGroup\\
regexEndGroup = re.compile(ur"^\\EndGroup\\\\", re.IGNORECASE)
# [masterlist補正用] BASH定義っぽい行検出正規表現
regexExBash = re.compile(ur"^([{]{2}BASH[:]\S+[}]{2}.*)$", re.IGNORECASE)
# [masterlist補正用] コメント間違いっぽい行検出正規表現
regexExComment = re.compile(ur"^/\s*(.*)")
# [masterlist補正用] ESM,ESPっぽい行検出正規表現
regexExMods1 = re.compile(ur"^(\w+(\w|[ ]|[$%'_@!()~-])+)\s*$")
regexWarnings = re.compile(ur"""
^([><]?)
([^><\\%?*:\"$^]{1})
([a-zA-Z0-9_() .\[\]#!+,%&'-])+
(?!
(
\s{2,}
|[_() .\[\]#!+,%&'-]{2,}
)
)[.](esm|esp)$
""", re.IGNORECASE | re.VERBOSE)
################################################################################
# Function
################################################################################
def CreateUuid():
return unicode(uuid.uuid4())
################################################################################
# Main
################################################################################
if __name__ == "__main__":
print u"%s Version: %s %s" % (__appname__, __version__, __copyright__)
print u""
usage = u"%prog [Options] MASTERLISTFILE"
version = u"%s %s" % (u"%prog", __version__)
parser = OptionParser(usage = usage, version = version)
parser.add_option("-o", "--output",
action="store",
type="string",
dest="outfilename",
default="MasterlistChecker.txt",
metavar="FILE",
help="specify an output file")
parser.add_option("-d", "--debug",
action="store_true",
dest="debug",
default=False,
help="debug output")
# オプションの解析
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error(u"incorrect number of arguments")
args0 = unicode(args[0], "shift-jis")
outfilename = unicode(options.outfilename, "shift-jis")
# 絶対パスの取得
MasterlistFile = u"%s" % (os.path.abspath(args0))
OutputFile = u"%s" % (os.path.abspath(outfilename))
# 入力ファイルの存在チェック
if not os.path.exists(MasterlistFile):
parser.error(u"file not exists. \'%s\'" % MasterlistFile)
# 出力ファイルが存在していたら削除する
if os.path.exists(OutputFile):
os.remove(OutputFile)
# 出力開始
fileoutput = codecs.open(OutputFile, "wU", "utf-8-sig")
try:
# 適当に出力用ファンクション作成
def WriteLine(debug = False, screen = True, file = True, line = u""):
if debug:
if options.debug:
# 出力する
if screen:
print u"%s" % (line)
if file:
fileoutput.write(u"%s\r\n" % (line))
else:
# 出力しない
pass
else:
if screen:
print u"%s" % (line)
if file:
fileoutput.write(u"%s\r\n" % (line))
return
def PrintWriteLine(line):
WriteLine(False, False, True, line)
return
def DebugWriteLine(line):
WriteLine(True, True, True, line)
return
PrintWriteLine(u"--------------------------------------------------")
PrintWriteLine(u"Output pyOss - MasterlistChecker.py")
PrintWriteLine(u"Input files:")
PrintWriteLine(u" Masterlist : %s" % (MasterlistFile))
PrintWriteLine(u"Output files:")
PrintWriteLine(u" OutputFile : %s" % (OutputFile))
PrintWriteLine(u"--------------------------------------------------")
SkipLines = []
StackErrors = {}
def AddStackErrors(count, error, message):
if not error in StackErrors:
StackErrors[error] = []
StackErrors[error] += [[count, message]]
# --------------------------------------------------
#
# --------------------------------------------------
masterfile = Masterlist()
def _onEncodingError(linecount, linestring, encoding):
message = u"Can not be displayed : %s" % (encoding)
AddStackErrors(linecount, u"A01 UNICODE encoding error!", message)
return
masterfile.OnEncodingErrorFromSave = _onEncodingError
masterfile.OnDecodingErrorFromLoad = _onEncodingError
def _onCreateLineObject(linecount, linestring):
lineobject = Line(linestring)
lineobject.LineCount = linecount
linestring = lineobject.LineString
if lineobject.IsType(EnumLineType.OTHER):
matchEx = [regexExBash.search(linestring)
,regexExComment.search(linestring)
,regexExMods1.search(linestring)]
if matchEx[0] is not None:
# Bashタグを書いたが、先頭の%を書き忘れちゃった感で一杯の行
# 先頭に%を付け足す。
linecorrectionstring = u"%"
AddStackErrors(linecount, u"A02 Typographical errors!", u"%s => %s" % (linecorrectionstring, linestring))
elif matchEx[1] is not None:
# コメントを書いたが、「\バックスラッシュ」と「/スラッシュ」を間違えた感で一杯の行
# ¥マークに書き換える。(英語圏では¥マークは\バックスラッシュに置き換えられる)
linecorrectionstring = u"\\"
AddStackErrors(linecount, u"A02 Typographical errors!", u"%s => %s" % (linecorrectionstring, linestring))
elif matchEx[2] is not None:
# 拡張子を書き忘れた感で一杯の行
# espとみなす。(少なくともピリオドがない行はESPファイルと思われる。)
# 今のところesmではミスなさそう。
linecorrectionstring = u".esp"
AddStackErrors(linecount, u"A02 Typographical errors!", u"%s => %s" % (linecorrectionstring, linestring))
else:
if len(linestring) != 0:
AddStackErrors(linecount, u"A03 Format unknown error!", u"%s" % (linestring))
if lineobject.IsType(EnumLineType.MODS):
match2 = regexWarnings.search(linestring)
if match2 == None:
pass
#AddStackErrors(linecount, u"A04 Warning! Please check if this is correct.", u"%s" % (linestring))
return lineobject
masterfile.OnCreateLineObject = _onCreateLineObject
loadingflg = False
try:
masterfile.Load(MasterlistFile)
loadingflg = True
except BaseException as ex:
AddStackErrors(0, u"A05 Load error!", unicode(ex))
PrintWriteLine(u"--------------------------------------------------")
PrintWriteLine(u"Could not run some checks!!!!")
PrintWriteLine(u"--------------------------------------------------")
if loadingflg:
AddStackErrors(0, u"A00 Encoding Information", masterfile.Encoding)
GroupLists = {}
ModsLists = {}
for object in masterfile.EachRecursion():
if isinstance(object, Line):
if object.IsType(EnumLineType.MODS):
if object.LineString in ModsLists:
ModsLists[object.LineString] += [object]
else:
ModsLists[object.LineString] = [object]
if object.GetParentGroup().GroupName == None:
AddStackErrors(object.LineCount, u"B01 Warning! There are lines that do not belong to the group.", u"%s" % (object.LineString))
elif isinstance(object, Block):
pass
elif isinstance(object, Group):
if object.GroupName in GroupLists:
GroupLists[object.GroupName] += [object]
else:
GroupLists[object.GroupName] = [object]
for key, value in GroupLists.iteritems():
if len(value) >= 2:
for group in value:
linecount = group.GetTopChild().GetTopChild().LineCount
AddStackErrors(linecount, u"B02 Duplicate groups error!", u"%s" % (group.GroupName))
for key, value in ModsLists.iteritems():
if len(value) >= 2:
for mods in value:
AddStackErrors(mods.LineCount, u"B03 Duplicate mods error!", u"%s" % (mods.LineString))
# --------------------------------------------------
# エラーの出力
# --------------------------------------------------
for errorkey in sorted(StackErrors):
errorvalue = StackErrors[errorkey]
PrintWriteLine(u"--------------------------------------------------")
PrintWriteLine(errorkey)
PrintWriteLine(u"--------------------------------------------------")
for error in errorvalue:
PrintWriteLine(u"%8d: %s" % (error[0], error[1]))
finally:
fileoutput.close()
print u"Completed!"
print u" Output File : %s" % (OutputFile)
|
gpl-3.0
| -4,486,569,772,765,041,000
| 37.088136
| 155
| 0.479403
| false
| 3.579944
| false
| false
| false
|
yk5/incubator-airflow
|
airflow/hooks/S3_hook.py
|
1
|
12717
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.contrib.hooks.aws_hook import AwsHook
from six import BytesIO
from urllib.parse import urlparse
import re
import fnmatch
class S3Hook(AwsHook):
"""
Interact with AWS S3, using the boto3 library.
"""
def get_conn(self):
return self.get_client_type('s3')
@staticmethod
def parse_s3_url(s3url):
parsed_url = urlparse(s3url)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket_name instead of "%s"' % s3url)
else:
bucket_name = parsed_url.netloc
key = parsed_url.path.strip('/')
return (bucket_name, key)
def check_for_bucket(self, bucket_name):
"""
Check if bucket_name exists.
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
try:
self.get_conn().head_bucket(Bucket=bucket_name)
return True
except:
return False
def get_bucket(self, bucket_name):
"""
Returns a boto3.S3.Bucket object
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
s3 = self.get_resource_type('s3')
return s3.Bucket(bucket_name)
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
Checks that a prefix exists in a bucket
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
def list_prefixes(self, bucket_name, prefix='', delimiter=''):
"""
Lists prefixes in a bucket under prefix
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter)
has_results = False
prefixes = []
for page in response:
if 'CommonPrefixes' in page:
has_results = True
for p in page['CommonPrefixes']:
prefixes.append(p['Prefix'])
if has_results:
return prefixes
def list_keys(self, bucket_name, prefix='', delimiter=''):
"""
Lists keys in a bucket under prefix and not containing delimiter
:param bucket_name: the name of the bucket
:type bucket_name: str
:param prefix: a key prefix
:type prefix: str
:param delimiter: the delimiter marks key hierarchy.
:type delimiter: str
"""
paginator = self.get_conn().get_paginator('list_objects_v2')
response = paginator.paginate(Bucket=bucket_name,
Prefix=prefix,
Delimiter=delimiter)
has_results = False
keys = []
for page in response:
if 'Contents' in page:
has_results = True
for k in page['Contents']:
keys.append(k['Key'])
if has_results:
return keys
def check_for_key(self, key, bucket_name=None):
"""
Checks if a key exists in a bucket
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
try:
self.get_conn().head_object(Bucket=bucket_name, Key=key)
return True
except:
return False
def get_key(self, key, bucket_name=None):
"""
Returns a boto3.s3.Object
:param key: the path to the key
:type key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
obj = self.get_resource_type('s3').Object(bucket_name, key)
obj.load()
return obj
def read_key(self, key, bucket_name=None):
"""
Reads a key from S3
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization={'CSV': {}},
output_serialization={'CSV': {}}):
"""
Reads a key with S3 Select.
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which the file is stored
:type bucket_name: str
:param expression: S3 Select expression
:type expression: str
:param expression_type: S3 Select expression type
:type expression_type: str
:param input_serialization: S3 Select input data serialization format
:type input_serialization: dict
:param output_serialization: S3 Select output data serialization format
:type output_serialization: dict
:return: retrieved subset of original data by S3 Select
:rtype: str
.. seealso::
For more details about S3 Select parameters:
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Client.select_object_content
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload']
for event in response['Payload']
if 'Records' in event)
def check_for_wildcard_key(self,
wildcard_key, bucket_name=None, delimiter=''):
"""
Checks that a key matching a wildcard expression exists in a bucket
"""
return self.get_wildcard_key(wildcard_key=wildcard_key,
bucket_name=bucket_name,
delimiter=delimiter) is not None
def get_wildcard_key(self, wildcard_key, bucket_name=None, delimiter=''):
"""
Returns a boto3.s3.Object object matching the wildcard expression
:param wildcard_key: the path to the key
:type wildcard_key: str
:param bucket_name: the name of the bucket
:type bucket_name: str
"""
if not bucket_name:
(bucket_name, wildcard_key) = self.parse_s3_url(wildcard_key)
prefix = re.split(r'[*]', wildcard_key, 1)[0]
klist = self.list_keys(bucket_name, prefix=prefix, delimiter=delimiter)
if klist:
key_matches = [k for k in klist if fnmatch.fnmatch(k, wildcard_key)]
if key_matches:
return self.get_key(key_matches[0], bucket_name)
def load_file(self,
filename,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads a local file to S3
:param filename: name of the file to load.
:type filename: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists. If replace is False and the key exists, an
error will be raised.
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
client = self.get_conn()
client.upload_file(filename, bucket_name, key, ExtraArgs=extra_args)
def load_string(self,
string_data,
key,
bucket_name=None,
replace=False,
encrypt=False,
encoding='utf-8'):
"""
Loads a string to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param string_data: string to set as content for the key.
:type string_data: str
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
self.load_bytes(string_data.encode(encoding),
key=key,
bucket_name=bucket_name,
replace=replace,
encrypt=encrypt)
def load_bytes(self,
bytes_data,
key,
bucket_name=None,
replace=False,
encrypt=False):
"""
Loads bytes to S3
This is provided as a convenience to drop a string in S3. It uses the
boto infrastructure to ship a file to s3.
:param bytes_data: bytes to set as content for the key.
:type bytes_data: bytes
:param key: S3 key that will point to the file
:type key: str
:param bucket_name: Name of the bucket in which to store the file
:type bucket_name: str
:param replace: A flag to decide whether or not to overwrite the key
if it already exists
:type replace: bool
:param encrypt: If True, the file will be encrypted on the server-side
by S3 and will be stored in an encrypted form while at rest in S3.
:type encrypt: bool
"""
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
if not replace and self.check_for_key(key, bucket_name):
raise ValueError("The key {key} already exists.".format(key=key))
extra_args = {}
if encrypt:
extra_args['ServerSideEncryption'] = "AES256"
filelike_buffer = BytesIO(bytes_data)
client = self.get_conn()
client.upload_fileobj(filelike_buffer, bucket_name, key, ExtraArgs=extra_args)
|
apache-2.0
| -2,631,231,125,961,501,700
| 34.621849
| 108
| 0.57663
| false
| 4.341755
| false
| false
| false
|
mluke93/osf.io
|
website/routes.py
|
1
|
51178
|
# -*- coding: utf-8 -*-
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from modularodm import Q
from modularodm.exceptions import QueryException, NoResultsFound
from website import util
from website import prereg
from website import settings
from website import language
from website.util import metrics
from website.util import paths
from website.util import sanitize
from website import maintenance
from website.models import Institution
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.preprints import views as preprint_views
from website.institutions import views as institution_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
user_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path} for inst in user.affiliated_institutions] if user else []
all_institutions = [{'id': inst._id, 'name': inst.name, 'logo_path': inst.logo_path} for inst in Institution.find().sort('name')]
if request.host_url != settings.DOMAIN:
try:
inst_id = (Institution.find_one(Q('domains', 'eq', request.host.lower())))._id
request_login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except NoResultsFound:
request_login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
request_login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_email_verifications': user.unconfirmed_email_info if user else [],
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'user_institutions': user_institutions if user else None,
'all_institutions': all_institutions,
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request_login_url),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen_project_id': settings.KEEN_PROJECT_ID,
'keen_write_key': settings.KEEN_WRITE_KEY,
'maintenance': maintenance.get_maintenance(),
}
def is_private_link_anonymous_view():
try:
# Avoid circular import
from website.project.model import PrivateLink
return PrivateLink.find_one(
Q('key', 'eq', request.args.get('view_only'))
).anonymous
except QueryException:
return False
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string, trust=False)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('index'))
status.push_status_message(language.LOGOUT, kind='success', trust=False)
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule(
'/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string, trust=False)
),
Rule(
'/api/v1/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
json_renderer
),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule(
'/dashboard/',
'get',
website_views.dashboard,
OsfWebRenderer('home.mako', trust=False)
),
Rule(
'/myprojects/',
'get',
website_views.my_projects,
OsfWebRenderer('my_projects.mako', trust=False)
),
Rule(
'/reproducibility/',
'get',
website_views.reproducibility,
notemplate
),
Rule('/about/', 'get', website_views.redirect_about, notemplate),
Rule('/help/', 'get', website_views.redirect_help, notemplate),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako', trust=False)),
Rule(['/getting-started/', '/getting-started/email/', '/howosfworks/'], 'get', website_views.redirect_getting_started, notemplate),
Rule('/support/', 'get', {}, OsfWebRenderer('public/pages/support.mako', trust=False)),
Rule(
'/explore/',
'get',
{},
OsfWebRenderer('public/explore.mako', trust=False)
),
Rule(
[
'/messages/',
],
'get',
{},
OsfWebRenderer('public/comingsoon.mako', trust=False)
),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako', trust=False),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako', trust=False),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako', trust=False),
),
Rule(
'/api/v1/meetings/submissions/',
'get',
conference_views.conference_submissions,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule(
'/news/',
'get',
website_views.redirect_to_cos_news,
notemplate
),
Rule(
'/prereg/',
'get',
prereg.prereg_landing_page,
OsfWebRenderer('prereg_landing_page.mako', trust=False)
),
Rule(
'/preprints/',
'get',
preprint_views.preprint_landing_page,
OsfWebRenderer('public/pages/preprint_landing.mako', trust=False),
),
Rule(
'/preprint/',
'get',
preprint_views.preprint_redirect,
notemplate,
),
Rule(
'/api/v1/prereg/draft_registrations/',
'get',
prereg.prereg_draft_registrations,
json_renderer,
),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako', trust=False),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/confirmed_emails/', 'put', auth_views.unconfirmed_email_add, json_renderer),
Rule('/confirmed_emails/', 'delete', auth_views.unconfirmed_email_remove, json_renderer)
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
process_rules(app, [
# confirm email
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
notemplate
),
# reset password get
Rule(
'/resetpassword/<verification_key>/',
'get',
auth_views.reset_password_get,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# reset password post
Rule(
'/resetpassword/<verification_key>/',
'post',
auth_views.reset_password_post,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# resend confirmation get
Rule(
'/resend/',
'get',
auth_views.resend_confirmation_get,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# resend confirmation post
Rule(
'/resend/',
'post',
auth_views.resend_confirmation_post,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# user sign up page
Rule(
'/register/',
'get',
auth_views.auth_register,
OsfWebRenderer('public/login.mako', trust=False)
),
# create user account via api
Rule(
'/api/v1/register/',
'post',
auth_views.register_user,
json_renderer
),
# osf login and campaign login
Rule(
[
'/login/',
'/account/'
],
'get',
auth_views.auth_login,
OsfWebRenderer('public/login.mako', trust=False)
),
# osf logout and cas logout
Rule(
'/logout/',
'get',
auth_views.auth_logout,
notemplate
),
# forgot password get
Rule(
'/forgotpassword/',
'get',
auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
# forgot password post
Rule(
'/forgotpassword/',
'post',
auth_views.forgot_password_post,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
Rule(
'/login/connected_tools/',
'get',
landing_page_views.connected_tools,
notemplate
),
Rule(
'/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
notemplate
),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/tokens/',
'get',
profile_views.personal_access_token_list,
OsfWebRenderer('profile/personal_tokens_list.mako', trust=False)
),
Rule(
'/settings/tokens/create/',
'get',
profile_views.personal_access_token_register,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
Rule(
'/settings/tokens/<_id>/',
'get',
profile_views.personal_access_token_detail,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
# TODO: Uncomment once outstanding issues with this feature are addressed
# Rule(
# '/@<twitter_handle>/',
# 'get',
# profile_views.redirect_to_twitter,
# OsfWebRenderer('error.mako', render_mako_string, trust=False)
# ),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule(
'/search/',
'get',
{},
OsfWebRenderer('search.mako', trust=False)
),
Rule(
'/share/',
'get',
{},
OsfWebRenderer('share_search.mako', trust=False)
),
Rule(
'/share/registration/',
'get',
{'register': settings.SHARE_REGISTRATION_URL},
OsfWebRenderer('share_registration.mako', trust=False)
),
Rule(
'/share/help/',
'get',
{'help': settings.SHARE_API_DOCS_URL},
OsfWebRenderer('share_api_docs.mako', trust=False)
),
Rule( # FIXME: Dead route; possible that template never existed; confirm deletion candidate with ErinB
'/share_dashboard/',
'get',
{},
OsfWebRenderer('share_dashboard.mako', trust=False)
),
Rule(
'/share/atom/',
'get',
search_views.search_share_atom,
xml_renderer
),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Institution
process_rules(app, [
Rule('/institutions/<inst_id>/', 'get', institution_views.view_institution, OsfWebRenderer('institution.mako', trust=False))
])
# Project
# Web
process_rules(app, [
# '/' route loads home.mako if logged in, otherwise loads landing.mako
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako', trust=False)),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('landing.mako', trust=False)),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako', trust=False)),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule( # TODO: Where, if anywhere, is this route used?
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako', trust=False)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<metaschema_id>/',
'/project/<pid>/node/<nid>/register/<metaschema_id>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'post',
project_views.drafts.new_draft_registration,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/',
],
'get',
project_views.drafts.edit_draft_registration_page,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
],
'get',
project_views.drafts.draft_before_register_page,
OsfWebRenderer('project/register_draft.mako', trust=False)),
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
# Statistics
Rule(
[
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
],
'get',
project_views.node.project_statistics_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
'/project/<pid>/files/deleted/<trashed_id>/',
'/project/<pid>/node/<nid>/files/deleted/<trashed_id>/',
],
'get',
addon_views.addon_deleted_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by [coming replacement]
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
# TODO: [#OSF-6557] Route "get_children" is deprecated. Use get_readable_descendants.
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
'/project/<pid>/get_readable_descendants/',
'/project/<pid>/node/<nid>/get_readable_descendants/',
], 'get', project_views.node.get_readable_descendants, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
# Draft Registrations
Rule([
'/project/<pid>/drafts/',
], 'get', project_views.drafts.get_draft_registrations, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'get', project_views.drafts.get_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'put', project_views.drafts.update_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'delete', project_views.drafts.delete_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/submit/',
], 'post', project_views.drafts.submit_draft_for_review, json_renderer),
# Meta Schemas
Rule([
'/project/drafts/schemas/',
], 'get', project_views.drafts.get_metaschemas, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule(
[
'/project/<pid>/contributor/remove/',
'/project/<pid>/node/<nid>/contributor/remove/',
],
'POST',
project_views.contributor.project_remove_contributor,
json_renderer,
),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
], 'post', project_views.drafts.register_draft_registration, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'get',
project_views.register.node_identifiers_get,
json_renderer,
),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
project_views.register.node_identifiers_post,
json_renderer,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/tree/',
'/project/<pid>/node/<nid>/tree/'
],
'get',
project_views.node.get_node_tree,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
)
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
|
apache-2.0
| 8,155,702,705,884,056,000
| 30.263286
| 147
| 0.506819
| false
| 4.252077
| false
| false
| false
|
Eric89GXL/vispy
|
vispy/visuals/graphs/layouts/force_directed.py
|
1
|
7502
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Force-Directed Graph Layout
===========================
This module contains implementations for a force-directed layout, where the
graph is modelled like a collection of springs or as a collection of
particles attracting and repelling each other. The whole graph tries to
reach a state which requires the minimum energy.
"""
import numpy as np
try:
from scipy.sparse import issparse
except ImportError:
def issparse(*args, **kwargs):
return False
from ..util import _straight_line_vertices, _rescale_layout
class fruchterman_reingold(object):
"""
Fruchterman-Reingold implementation adapted from NetworkX.
In the Fruchterman-Reingold algorithm, the whole graph is modelled as a
collection of particles, it runs a simplified particle simulation to
find a nice layout for the graph.
Paramters
---------
optimal : number
Optimal distance between nodes. Defaults to :math:`1/\\sqrt{N}` where
N is the number of nodes.
iterations : int
Number of iterations to perform for layout calculation.
pos : array
Initial positions of the nodes
Notes
-----
The algorithm is explained in more detail in the original paper [1]_.
.. [1] Fruchterman, Thomas MJ, and Edward M. Reingold. "Graph drawing by
force-directed placement." Softw., Pract. Exper. 21.11 (1991),
1129-1164.
"""
def __init__(self, optimal=None, iterations=50, pos=None):
self.dim = 2
self.optimal = optimal
self.iterations = iterations
self.num_nodes = None
self.pos = pos
def __call__(self, adjacency_mat, directed=False):
"""
Starts the calculation of the graph layout.
This is a generator, and after each iteration it yields the new
positions for the nodes, together with the vertices for the edges
and the arrows.
There are two solvers here: one specially adapted for SciPy sparse
matrices, and the other for larger networks.
Parameters
----------
adjacency_mat : array
The graph adjacency matrix.
directed : bool
Wether the graph is directed or not. If this is True,
it will draw arrows for directed edges.
Yields
------
layout : tuple
For each iteration of the layout calculation it yields a tuple
containing (node_vertices, line_vertices, arrow_vertices). These
vertices can be passed to the `MarkersVisual` and `ArrowVisual`.
"""
if adjacency_mat.shape[0] != adjacency_mat.shape[1]:
raise ValueError("Adjacency matrix should be square.")
self.num_nodes = adjacency_mat.shape[0]
if issparse(adjacency_mat):
# Use the sparse solver
solver = self._sparse_fruchterman_reingold
else:
solver = self._fruchterman_reingold
for result in solver(adjacency_mat, directed):
yield result
def _fruchterman_reingold(self, adjacency_mat, directed=False):
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_mat, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# this is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
# The inscrutable (but fast) version
# This is still O(V^2)
# Could use multilevel methods to speed this up significantly
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_mat, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# cool temperature
t -= dt
# Calculate edge vertices and arrows
line_vertices, arrows = _straight_line_vertices(adjacency_mat,
pos, directed)
yield pos, line_vertices, arrows
def _sparse_fruchterman_reingold(self, adjacency_mat, directed=False):
# Optimal distance between nodes
if self.optimal is None:
self.optimal = 1 / np.sqrt(self.num_nodes)
# Change to list of list format
# Also construct the matrix in COO format for easy edge construction
adjacency_arr = adjacency_mat.toarray()
adjacency_coo = adjacency_mat.tocoo()
if self.pos is None:
# Random initial positions
pos = np.asarray(
np.random.random((self.num_nodes, self.dim)),
dtype=np.float32
)
else:
pos = self.pos.astype(np.float32)
# Yield initial positions
line_vertices, arrows = _straight_line_vertices(adjacency_coo, pos,
directed)
yield pos, line_vertices, arrows
# The initial "temperature" is about .1 of domain area (=1x1)
# This is the largest step allowed in the dynamics.
t = 0.1
# Simple cooling scheme.
# Linearly step down by dt on each iteration so last iteration is
# size dt.
dt = t / float(self.iterations+1)
for iteration in range(self.iterations):
delta_pos = _calculate_delta_pos(adjacency_arr, pos, t,
self.optimal)
pos += delta_pos
_rescale_layout(pos)
# Cool temperature
t -= dt
# Calculate line vertices
line_vertices, arrows = _straight_line_vertices(adjacency_coo,
pos, directed)
yield pos, line_vertices, arrows
def _calculate_delta_pos(adjacency_arr, pos, t, optimal):
"""Helper to calculate the delta position"""
# XXX eventually this should be refactored for the sparse case to only
# do the necessary pairwise distances
delta = pos[:, np.newaxis, :] - pos
# Distance between points
distance2 = (delta*delta).sum(axis=-1)
# Enforce minimum distance of 0.01
distance2 = np.where(distance2 < 0.0001, 0.0001, distance2)
distance = np.sqrt(distance2)
# Displacement "force"
displacement = np.zeros((len(delta), 2))
for ii in range(2):
displacement[:, ii] = (
delta[:, :, ii] *
((optimal * optimal) / (distance*distance) -
(adjacency_arr * distance) / optimal)).sum(axis=1)
length = np.sqrt((displacement**2).sum(axis=1))
length = np.where(length < 0.01, 0.1, length)
delta_pos = displacement * t / length[:, np.newaxis]
return delta_pos
|
bsd-3-clause
| -9,037,603,166,237,468,000
| 34.386792
| 77
| 0.589443
| false
| 4.274644
| false
| false
| false
|
mrachinskiy/booltron
|
ops_destructive/destructive_func.py
|
1
|
5385
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# Booltron super add-on for super fast booleans.
# Copyright (C) 2014-2021 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
from .. import var, lib
from . import mesh_lib
def cursor_state(func):
def wrapper(*args):
bpy.context.window.cursor_set("WAIT")
result = func(*args)
bpy.context.window.cursor_set("DEFAULT")
return result
return wrapper
def prepare_objects(self, context):
ob1 = context.object
obs = context.selected_objects
if ob1.select_get():
obs.remove(ob1)
if self.keep_objects:
space_data = context.space_data
use_local_view = bool(space_data.local_view)
obs_copy = []
app = obs_copy.append
for ob in obs:
ob_copy = ob.copy()
ob_copy.data = ob.data.copy()
for coll in ob.users_collection:
coll.objects.link(ob_copy)
if use_local_view:
ob_copy.local_view_set(space_data, True)
ob_copy.select_set(True)
ob.select_set(False)
app(ob_copy)
obs = obs_copy
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.convert(target="MESH")
if self.use_pos_offset:
lib.object_offset(obs, self.pos_offset)
return obs
@cursor_state
def execute(self, context):
Mesh = mesh_lib.Utils(self)
boolean_mod = lib.ModUtils(self).add
ob1 = context.object
obs = prepare_objects(self, context)
ob2 = obs.pop()
if obs:
if self.is_overlap:
Mesh.prepare(ob2, select=True)
for ob3 in obs:
Mesh.prepare(ob3, select=True)
boolean_mod(ob2, ob3, "UNION")
if self.cleanup:
Mesh.cleanup(ob2)
else:
obs.append(ob2)
override = {
"active_object": ob2,
"selected_editable_objects": obs,
}
bpy.ops.object.join(override)
if not self.is_overlap:
Mesh.prepare(ob2, select=True)
Mesh.prepare(ob1, select=False)
boolean_mod(ob1, ob2, self.mode)
if self.cleanup:
Mesh.cleanup(ob1)
Mesh.check(ob1)
return {"FINISHED"}
def invoke(self, context, event):
obs = []
app = obs.append
for ob in context.selected_objects:
if ob.type not in {"MESH", "CURVE", "SURFACE", "META", "FONT"}:
ob.select_set(False)
continue
app(ob)
if len(obs) < 2:
self.report({"ERROR"}, "At least two objects must be selected")
return {"CANCELLED"}
if self.first_run:
self.first_run = False
prefs = context.preferences.addons[var.ADDON_ID].preferences
self.solver = prefs.solver
self.threshold = prefs.threshold
self.use_pos_offset = prefs.use_pos_offset
self.pos_offset = prefs.pos_offset
self.merge_distance = prefs.merge_distance
self.cleanup = prefs.cleanup
self.triangulate = prefs.triangulate
self.keep_objects = event.alt
self.is_overlap = False
if len(obs) > 2 and self.mode is not None:
obs.remove(context.object)
self.is_overlap = mesh_lib.detect_overlap(context, obs, self.merge_distance)
if event.ctrl:
wm = context.window_manager
return wm.invoke_props_dialog(self)
return self.execute(context)
@cursor_state
def execute_slice(self, context):
Mesh = mesh_lib.Utils(self)
boolean_mod = lib.ModUtils(self).add
space_data = context.space_data
use_local_view = bool(space_data.local_view)
ob1 = context.object
obs = prepare_objects(self, context)
Mesh.prepare(ob1, select=False)
for ob2 in obs:
Mesh.prepare(ob2, select=True)
# Create copy of main object
# ---------------------------------
ob1_copy = ob1.copy()
ob1_copy.data = ob1.data.copy()
for coll in ob1.users_collection:
coll.objects.link(ob1_copy)
if use_local_view:
ob1_copy.local_view_set(space_data, True)
ob1_copy.select_set(True)
# Main object difference
# ---------------------------------
boolean_mod(ob1, ob2, "DIFFERENCE", remove_ob2=False)
if Mesh.check(ob1):
return {"FINISHED"}
# Copy object intersect
# ---------------------------------
boolean_mod(ob1_copy, ob2, "INTERSECT")
if Mesh.check(ob1_copy):
return {"FINISHED"}
if self.cleanup:
Mesh.cleanup(ob1)
ob1.select_set(False)
context.view_layer.objects.active = ob1_copy
return {"FINISHED"}
|
gpl-3.0
| 4,390,248,468,535,569,000
| 25.268293
| 84
| 0.591272
| false
| 3.609249
| false
| false
| false
|
googleapis/python-pubsub
|
google/pubsub_v1/services/schema_service/pagers.py
|
1
|
5563
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.pubsub_v1.types import schema
class ListSchemasPager:
"""A pager for iterating through ``list_schemas`` requests.
This class thinly wraps an initial
:class:`google.pubsub_v1.types.ListSchemasResponse` object, and
provides an ``__iter__`` method to iterate through its
``schemas`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSchemas`` requests and continue to iterate
through the ``schemas`` field on the
corresponding responses.
All the usual :class:`google.pubsub_v1.types.ListSchemasResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., schema.ListSchemasResponse],
request: schema.ListSchemasRequest,
response: schema.ListSchemasResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.pubsub_v1.types.ListSchemasRequest):
The initial request object.
response (google.pubsub_v1.types.ListSchemasResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = schema.ListSchemasRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[schema.ListSchemasResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[schema.Schema]:
for page in self.pages:
yield from page.schemas
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSchemasAsyncPager:
"""A pager for iterating through ``list_schemas`` requests.
This class thinly wraps an initial
:class:`google.pubsub_v1.types.ListSchemasResponse` object, and
provides an ``__aiter__`` method to iterate through its
``schemas`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSchemas`` requests and continue to iterate
through the ``schemas`` field on the
corresponding responses.
All the usual :class:`google.pubsub_v1.types.ListSchemasResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[schema.ListSchemasResponse]],
request: schema.ListSchemasRequest,
response: schema.ListSchemasResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.pubsub_v1.types.ListSchemasRequest):
The initial request object.
response (google.pubsub_v1.types.ListSchemasResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = schema.ListSchemasRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[schema.ListSchemasResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[schema.Schema]:
async def async_generator():
async for page in self.pages:
for response in page.schemas:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
apache-2.0
| 3,531,521,777,943,690,000
| 34.433121
| 87
| 0.6421
| false
| 4.415079
| false
| false
| false
|
pozetroninc/micropython
|
tools/codeformat.py
|
1
|
5981
|
#!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"extmod/*.[ch]",
"lib/netutils/*.[ch]",
"lib/timeutils/*.[ch]",
"lib/utils/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"ports/windows/msvc/**/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"py/**/*.py",
"tools/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
FIXUP_REPLACEMENTS = ((re.compile("sizeof\(([a-z_]+)\) \*\(([a-z_]+)\)"), r"sizeof(\1) * (\2)"),)
def list_files(paths, exclusions=None, prefix=""):
files = set()
for pattern in paths:
files.update(glob.glob(os.path.join(prefix, pattern), recursive=True))
for pattern in exclusions or []:
files.difference_update(glob.fnmatch.filter(files, os.path.join(prefix, pattern)))
return sorted(files)
def fixup_c(filename):
# Read file.
with open(filename) as f:
lines = f.readlines()
# Write out file with fixups.
with open(filename, "w", newline="") as f:
dedent_stack = []
while lines:
# Get next line.
l = lines.pop(0)
# Dedent #'s to match indent of following line (not previous line).
m = re.match(r"( +)#(if |ifdef |ifndef |elif |else|endif)", l)
if m:
indent = len(m.group(1))
directive = m.group(2)
if directive in ("if ", "ifdef ", "ifndef "):
l_next = lines[0]
indent_next = len(re.match(r"( *)", l_next).group(1))
if indent - 4 == indent_next and re.match(r" +(} else |case )", l_next):
# This #-line (and all associated ones) needs dedenting by 4 spaces.
l = l[4:]
dedent_stack.append(indent - 4)
else:
# This #-line does not need dedenting.
dedent_stack.append(-1)
else:
if dedent_stack[-1] >= 0:
# This associated #-line needs dedenting to match the #if.
indent_diff = indent - dedent_stack[-1]
assert indent_diff >= 0
l = l[indent_diff:]
if directive == "endif":
dedent_stack.pop()
# Apply general regex-based fixups.
for regex, replacement in FIXUP_REPLACEMENTS:
l = regex.sub(replacement, l)
# Write out line.
f.write(l)
assert not dedent_stack, filename
def main():
cmd_parser = argparse.ArgumentParser(description="Auto-format C and Python files.")
cmd_parser.add_argument("-c", action="store_true", help="Format C code only")
cmd_parser.add_argument("-p", action="store_true", help="Format Python code only")
cmd_parser.add_argument("files", nargs="*", help="Run on specific globs")
args = cmd_parser.parse_args()
# Setting only one of -c or -p disables the other. If both or neither are set, then do both.
format_c = args.c or not args.p
format_py = args.p or not args.c
# Expand the globs passed on the command line, or use the default globs above.
files = []
if args.files:
files = list_files(args.files)
else:
files = list_files(PATHS, EXCLUSIONS, TOP)
# Extract files matching a specific language.
def lang_files(exts):
for file in files:
if os.path.splitext(file)[1].lower() in exts:
yield file
# Run tool on N files at a time (to avoid making the command line too long).
def batch(cmd, files, N=200):
while True:
file_args = list(itertools.islice(files, N))
if not file_args:
break
subprocess.check_call(cmd + file_args)
# Format C files with uncrustify.
if format_c:
batch(["uncrustify", "-c", UNCRUSTIFY_CFG, "-lC", "--no-backup"], lang_files(C_EXTS))
for file in lang_files(C_EXTS):
fixup_c(file)
# Format Python files with black.
if format_py:
batch(["black", "-q", "--fast", "--line-length=99"], lang_files(PY_EXTS))
if __name__ == "__main__":
main()
|
mit
| -5,834,360,555,829,730,000
| 32.982955
| 97
| 0.584852
| false
| 3.742804
| false
| false
| false
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/model/migrate/versions/0056_workflow_outputs.py
|
1
|
1144
|
"""
Migration script to create tables for adding explicit workflow outputs.
"""
from sqlalchemy import *
from sqlalchemy.orm import *
from migrate import *
from migrate.changeset import *
import logging
logging.basicConfig( level=logging.DEBUG )
log = logging.getLogger( __name__ )
# Need our custom types, but don't import anything else from model
from galaxy.model.custom_types import *
metadata = MetaData()
WorkflowOutput_table = Table( "workflow_output", metadata,
Column( "id", Integer, primary_key=True ),
Column( "workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column( "output_name", String(255), nullable=True))
tables = [WorkflowOutput_table]
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print __doc__
metadata.reflect()
for table in tables:
try:
table.create()
except:
log.warn( "Failed to create table '%s', ignoring (might result in wrong schema)" % table.name )
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
for table in tables:
table.drop()
|
gpl-3.0
| 321,194,510,091,218,500
| 27.6
| 107
| 0.693182
| false
| 3.958478
| false
| false
| false
|
dc3-plaso/plaso
|
plaso/cli/extract_analyze_tool.py
|
1
|
2810
|
# -*- coding: utf-8 -*-
"""The extraction and analysis CLI tool."""
import datetime
import os
from plaso.cli import status_view_tool
from plaso.lib import errors
class ExtractionAndAnalysisTool(status_view_tool.StatusViewTool):
"""Class that implements a combined extraction and analysis CLI tool."""
def __init__(self, input_reader=None, output_writer=None):
"""Initializes the CLI tool object.
Args:
input_reader (InputReader): the input reader, where None represents stdin.
output_writer (OutputWriter): the output writer, where None represents
stdout.
"""
super(ExtractionAndAnalysisTool, self).__init__(
input_reader=input_reader, output_writer=output_writer)
self._storage_file_path = None
def _GenerateStorageFileName(self):
"""Generates a name for the storage file.
The result use a timestamp and the basename of the source path.
Raises:
BadConfigOption: raised if the source path is not set.
"""
if not self._source_path:
raise errors.BadConfigOption(u'Please define a source (--source).')
timestamp = datetime.datetime.now()
datetime_string = timestamp.strftime(u'%Y%m%dT%H%M%S')
source_path = os.path.abspath(self._source_path)
source_name = os.path.basename(source_path)
if source_path.endswith(os.path.sep):
source_path = os.path.dirname(source_path)
if source_path == os.path.sep:
# The user passed the filesystem's root as source
source_name = u'ROOT'
else:
source_name = os.path.basename(source_path)
return u'{0:s}-{1:s}.plaso'.format(datetime_string, source_name)
def _ParseStorageFileOptions(self, options):
"""Parses the storage file options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
self._storage_file_path = self.ParseStringOption(options, u'storage_file')
if not self._storage_file_path:
self._storage_file_path = self._GenerateStorageFileName()
def AddStorageFileOptions(self, argument_group):
"""Adds the storage file options to the argument group.
Args:
argument_group (argparse._ArgumentGroup or argparse.ArgumentParser):
argument group or argument parser.
"""
argument_group.add_argument(
u'--storage_file', action=u'store', metavar=u'STORAGE_FILE', nargs=u'?',
type=str, default=None, help=u'The path of the storage file.')
def ParseOptions(self, options):
"""Parses tool specific options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
super(ExtractionAndAnalysisTool, self).ParseOptions(options)
self._ParseStorageFileOptions(options)
|
apache-2.0
| 8,326,963,996,448,086,000
| 30.931818
| 80
| 0.688968
| false
| 3.935574
| false
| false
| false
|
lmarent/network_agents_ver2_python
|
agents/GraphExecution.py
|
1
|
4105
|
import multiprocessing
from ProviderAgentException import ProviderException
import MySQLdb
import logging
import foundation.agent_properties
logger = logging.getLogger('presenter_application')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('presenter_logs.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def load_offered_data(cursor, offer_variable):
if offer_variable != None:
sql_variable = "select name, type, function, decision_variable_id \
from simulation_offeringdata \
where id = '%d' " % (offer_variable)
cursor.execute(sql_variable)
# Fetch all the rows in a list of lists.
variables_res = cursor.fetchall()
variable_def = {}
for row in variables_res:
variable_def['id'] = offer_variable
variable_def['name'] = row[0]
variable_def['type'] = row[1]
variable_def['function'] = row[2]
variable_def['decision_variable'] = str(row[3])
else:
variable_def = {}
return variable_def
def load_graphics(cursor2, cursor3, graphics):
sql_graphics = "select b.graphic_id, c.name, b.detail, b.label_id, \
b.color_id, b.x_axis_id, b.y_axis_id, b.column1_id, \
b.column2_id, b.column3_id, b.column4_id \
from simulation_presenter_graphic a, \
simulation_axis_graphic b, \
simulation_graphic c \
where a.graphic_id = b.graphic_id \
and a.presenter_id = ( select d.id \
from simulation_presenter d \
limit 1 ) \
and c.id = a.graphic_id \
order by b.graphic_id"
cursor2.execute(sql_graphics)
# Fetch all the rows in a list of lists.
graphics_res = cursor2.fetchall()
for row in graphics_res:
# Establish detail property
if (row[2] == 1):
detail = True
else:
detail = False
# Establish label property
if ( row[3] > 0 ):
label = load_offered_data(cursor3, row[3])
else:
label = None
# Establish color property
if ( row[4] > 0 ):
color = load_offered_data(cursor3, row[4])
colors = {}
else:
color = None
variable_x = load_offered_data(cursor3, row[5])
variable_y = load_offered_data(cursor3, row[6])
column1 = load_offered_data(cursor3, row[7])
column2 = load_offered_data(cursor3, row[8])
column3 = load_offered_data(cursor3, row[9])
column4 = load_offered_data(cursor3, row[10])
graphics[row[0]] = {'name': row[1], 'detail': detail,
'x_axis' : variable_x, 'y_axis' : variable_y,
'label' : label, 'color' : color,
'instance_colors' : colors, 'column1' : column1,
'column2' : column2, 'column3' : column3, 'column4' : column4}
if __name__ == '__main__':
'''
The PresenterExecution starts the threads for the presenter
agents.
'''
try:
# Open database connection
db = MySQLdb.connect(foundation.agent_properties.addr_database,foundation.agent_properties.user_database,
foundation.agent_properties.user_password,foundation.agent_properties.database_name )
# prepare a cursor object using cursor() method
cursor = db.cursor()
graphics = {}
cursor3 = db.cursor()
cursor4 = db.cursor()
logger.info('Ready to load Graphics')
load_graphics(cursor3, cursor4, graphics)
print graphics
logger.info('Graphics loaded')
except ProviderException as e:
print e.__str__()
except Exception as e:
print e.__str__()
|
mit
| -8,014,453,808,743,486,000
| 38.471154
| 114
| 0.552741
| false
| 4.016634
| false
| false
| false
|
Newsboy-VA/Newsboy-Core
|
core/core.py
|
1
|
1657
|
#!/usr/bin/env python3
import asyncio
import sys
import logging
import argparse
from nlu import NLU
from client_communication import VAClientHandler
from module_communication import VAModuleHandler
class VirtualAssistant(object):
''' '''
def __init__(self):
parser = argparse.ArgumentParser(
description='Start the Virtual Assistant Core.')
parser.add_argument('--port', type=int, default=55801)
parser.add_argument('--log-level', type=str.upper, default='INFO')
args = parser.parse_args()
self.log_level = args.log_level.lower()
FORMAT = '%(asctime)-15s %(levelname)-5s (PID %(process)d) %(message)s'
logging.basicConfig(
filename='{}.log'.format(self.log_level.lower()),
level=getattr(logging, self.log_level.upper()),
format=FORMAT,
)
self.loop = asyncio.get_event_loop()
self.nlu = NLU()
self.client_handler = VAClientHandler(self, args.port)
self.module_handler = VAModuleHandler(self, args.port+1)
def __enter__(self):
self.loop.run_forever()
return self
def __exit__(self, type, value, traceback):
logging.info("Shutting down...")
# Close the servers
self.client_handler.close()
self.module_handler.close()
self.loop.stop()
if sys.version_info[1] >= 6:
self.loop.run_until_complete(self.loop.shutdown_asyncgens())
self.loop.close()
logging.shutdown()
return isinstance(value, KeyboardInterrupt)
if __name__ == "__main__":
with VirtualAssistant() as VA:
pass
|
gpl-3.0
| 4,354,760,709,866,121,000
| 27.084746
| 79
| 0.613156
| false
| 3.908019
| false
| false
| false
|
josiah-wolf-oberholtzer/discograph
|
discograph/ui.py
|
1
|
3715
|
# -*- encoding: utf-8 -*-
import json
from flask import Blueprint
from flask import current_app
from flask import make_response
from flask import request
from flask import render_template
from flask import url_for
from discograph import exceptions
from discograph import helpers
blueprint = Blueprint('ui', __name__, template_folder='templates')
default_roles = (
'Alias',
'Member Of',
'Sublabel Of',
'Released On',
)
@blueprint.route('/')
def route__index():
import discograph
app = current_app._get_current_object()
is_a_return_visitor = request.cookies.get('is_a_return_visitor')
initial_json = 'var dgData = null;'
on_mobile = request.MOBILE
parsed_args = helpers.parse_request_args(request.args)
original_roles, original_year = parsed_args
if not original_roles:
original_roles = default_roles
multiselect_mapping = discograph.CreditRole.get_multiselect_mapping()
url = url_for(
request.endpoint,
roles=original_roles,
)
rendered_template = render_template(
'index.html',
application_url=app.config['APPLICATION_ROOT'],
initial_json=initial_json,
is_a_return_visitor=is_a_return_visitor,
multiselect_mapping=multiselect_mapping,
og_title='Disco/graph: visualizing music as a social graph',
og_url=url,
on_mobile=on_mobile,
original_roles=original_roles,
original_year=original_year,
title='Disco/graph: Visualizing music as a social graph',
)
response = make_response(rendered_template)
response.set_cookie('is_a_return_visitor', 'true')
return response
@blueprint.route('/<entity_type>/<int:entity_id>')
def route__entity_type__entity_id(entity_type, entity_id):
import discograph
app = current_app._get_current_object()
parsed_args = helpers.parse_request_args(request.args)
original_roles, original_year = parsed_args
if not original_roles:
original_roles = default_roles
if entity_type not in ('artist', 'label'):
raise exceptions.APIError(message='Bad Entity Type', status_code=404)
on_mobile = request.MOBILE
data = helpers.get_network(
entity_id,
entity_type,
on_mobile=on_mobile,
cache=True,
roles=original_roles,
)
if data is None:
raise exceptions.APIError(message='No Data', status_code=500)
initial_json = json.dumps(
data,
sort_keys=True,
indent=4,
separators=(',', ': '),
)
initial_json = 'var dgData = {};'.format(initial_json)
entity_name = data['center']['name']
is_a_return_visitor = request.cookies.get('is_a_return_visitor')
key = '{}-{}'.format(entity_type, entity_id)
#url = '/{}/{}'.format(entity_type, entity_id)
url = url_for(
request.endpoint,
entity_type=entity_type,
entity_id=entity_id,
roles=original_roles,
)
title = 'Disco/graph: {}'.format(entity_name)
multiselect_mapping = discograph.CreditRole.get_multiselect_mapping()
rendered_template = render_template(
'index.html',
application_url=app.config['APPLICATION_ROOT'],
initial_json=initial_json,
is_a_return_visitor=is_a_return_visitor,
key=key,
multiselect_mapping=multiselect_mapping,
og_title='Disco/graph: The "{}" network'.format(entity_name),
og_url=url,
on_mobile=on_mobile,
original_roles=original_roles,
original_year=original_year,
title=title,
)
response = make_response(rendered_template)
response.set_cookie('is_a_return_visitor', 'true')
return response
|
mit
| 2,357,549,400,716,320,300
| 31.596491
| 77
| 0.644415
| false
| 3.678218
| false
| false
| false
|
cl4u2/chirp
|
chirp/xml_ll.py
|
1
|
7989
|
# Copyright 2008 Dan Smith <dsmith@danplanet.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from chirp import chirp_common, errors
def get_memory(doc, number):
"""Extract a Memory object from @doc"""
ctx = doc.xpathNewContext()
base = "//radio/memories/memory[@location=%i]" % number
fields = ctx.xpathEval(base)
if len(fields) > 1:
raise errors.RadioError("%i memories claiming to be %i" % (len(fields),
number))
elif len(fields) == 0:
raise errors.InvalidMemoryLocation("%i does not exist" % number)
memnode = fields[0]
def _get(ext):
path = base + ext
result = ctx.xpathEval(path)
if result:
return result[0].getContent()
else:
return ""
if _get("/mode/text()") == "DV":
mem = chirp_common.DVMemory()
mem.dv_urcall = _get("/dv/urcall/text()")
mem.dv_rpt1call = _get("/dv/rpt1call/text()")
mem.dv_rpt2call = _get("/dv/rpt2call/text()")
try:
mem.dv_code = _get("/dv/digitalCode/text()")
except ValueError:
mem.dv_code = 0
else:
mem = chirp_common.Memory()
mem.number = int(memnode.prop("location"))
mem.name = _get("/longName/text()")
mem.freq = chirp_common.parse_freq(_get("/frequency/text()"))
mem.rtone = float(_get("/squelch[@id='rtone']/tone/text()"))
mem.ctone = float(_get("/squelch[@id='ctone']/tone/text()"))
mem.dtcs = int(_get("/squelch[@id='dtcs']/code/text()"), 10)
mem.dtcs_polarity = _get("/squelch[@id='dtcs']/polarity/text()")
try:
sql = _get("/squelchSetting/text()")
if sql == "rtone":
mem.tmode = "Tone"
elif sql == "ctone":
mem.tmode = "TSQL"
elif sql == "dtcs":
mem.tmode = "DTCS"
else:
mem.tmode = ""
except IndexError:
mem.tmode = ""
dmap = {"positive" : "+", "negative" : "-", "none" : ""}
dupx = _get("/duplex/text()")
mem.duplex = dmap.get(dupx, "")
mem.offset = chirp_common.parse_freq(_get("/offset/text()"))
mem.mode = _get("/mode/text()")
mem.tuning_step = float(_get("/tuningStep/text()"))
skip = _get("/skip/text()")
if skip == "none":
mem.skip = ""
else:
mem.skip = skip
#FIXME: bank support in .chirp files needs to be re-written
#bank_id = _get("/bank/@bankId")
#if bank_id:
# mem.bank = int(bank_id)
# bank_index = _get("/bank/@bankIndex")
# if bank_index:
# mem.bank_index = int(bank_index)
return mem
def set_memory(doc, mem):
"""Set @mem in @doc"""
ctx = doc.xpathNewContext()
base = "//radio/memories/memory[@location=%i]" % mem.number
fields = ctx.xpathEval(base)
if len(fields) > 1:
raise errors.RadioError("%i memories claiming to be %i" % (len(fields),
mem.number))
elif len(fields) == 1:
fields[0].unlinkNode()
radio = ctx.xpathEval("//radio/memories")[0]
memnode = radio.newChild(None, "memory", None)
memnode.newProp("location", "%i" % mem.number)
sname_filter = "[^A-Z0-9/ >-]"
sname = memnode.newChild(None, "shortName", None)
sname.addContent(re.sub(sname_filter, "", mem.name.upper()[:6]))
lname_filter = "[^.A-Za-z0-9/ >-]"
lname = memnode.newChild(None, "longName", None)
lname.addContent(re.sub(lname_filter, "", mem.name[:16]))
freq = memnode.newChild(None, "frequency", None)
freq.newProp("units", "MHz")
freq.addContent(chirp_common.format_freq(mem.freq))
rtone = memnode.newChild(None, "squelch", None)
rtone.newProp("id", "rtone")
rtone.newProp("type", "repeater")
tone = rtone.newChild(None, "tone", None)
tone.addContent("%.1f" % mem.rtone)
ctone = memnode.newChild(None, "squelch", None)
ctone.newProp("id", "ctone")
ctone.newProp("type", "ctcss")
tone = ctone.newChild(None, "tone", None)
tone.addContent("%.1f" % mem.ctone)
dtcs = memnode.newChild(None, "squelch", None)
dtcs.newProp("id", "dtcs")
dtcs.newProp("type", "dtcs")
code = dtcs.newChild(None, "code", None)
code.addContent("%03i" % mem.dtcs)
polr = dtcs.newChild(None, "polarity", None)
polr.addContent(mem.dtcs_polarity)
sset = memnode.newChild(None, "squelchSetting", None)
if mem.tmode == "Tone":
sset.addContent("rtone")
elif mem.tmode == "TSQL":
sset.addContent("ctone")
elif mem.tmode == "DTCS":
sset.addContent("dtcs")
dmap = {"+" : "positive", "-" : "negative", "" : "none"}
dupx = memnode.newChild(None, "duplex", None)
dupx.addContent(dmap[mem.duplex])
oset = memnode.newChild(None, "offset", None)
oset.newProp("units", "MHz")
oset.addContent(chirp_common.format_freq(mem.offset))
mode = memnode.newChild(None, "mode", None)
mode.addContent(mem.mode)
step = memnode.newChild(None, "tuningStep", None)
step.newProp("units", "kHz")
step.addContent("%.5f" % mem.tuning_step)
if mem.skip:
skip = memnode.newChild(None, "skip", None)
skip.addContent(mem.skip)
#FIXME: .chirp bank support needs to be redone
#if mem.bank is not None:
# bank = memnode.newChild(None, "bank", None)
# bank.newProp("bankId", str(int(mem.bank)))
# if mem.bank_index >= 0:
# bank.newProp("bankIndex", str(int(mem.bank_index)))
if isinstance(mem, chirp_common.DVMemory):
dv = memnode.newChild(None, "dv", None)
ur = dv.newChild(None, "urcall", None)
ur.addContent(mem.dv_urcall)
r1 = dv.newChild(None, "rpt1call", None)
if mem.dv_rpt1call and mem.dv_rpt1call != "*NOTUSE*":
r1.addContent(mem.dv_rpt1call)
r2 = dv.newChild(None, "rpt2call", None)
if mem.dv_rpt2call and mem.dv_rpt2call != "*NOTUSE*":
r2.addContent(mem.dv_rpt2call)
dc = dv.newChild(None, "digitalCode", None)
dc.addContent(str(mem.dv_code))
def del_memory(doc, number):
"""Remove memory @number from @doc"""
path = "//radio/memories/memory[@location=%i]" % number
ctx = doc.xpathNewContext()
fields = ctx.xpathEval(path)
for field in fields:
field.unlinkNode()
def _get_bank(node):
bank = chirp_common.Bank(node.prop("label"))
ident = int(node.prop("id"))
return ident, bank
def get_banks(doc):
"""Return a list of banks from @doc"""
path = "//radio/banks/bank"
ctx = doc.xpathNewContext()
fields = ctx.xpathEval(path)
banks = []
for field in fields:
banks.append(_get_bank(field))
def _cmp(itema, itemb):
return itema[0] - itemb[0]
banks.sort(cmp=_cmp)
return [x[1] for x in banks]
def set_banks(doc, banklist):
"""Set the list of banks in @doc"""
path = "//radio/banks/bank"
ctx = doc.xpathNewContext()
fields = ctx.xpathEval(path)
for field in fields:
field.unlinkNode()
path = "//radio/banks"
ctx = doc.xpathNewContext()
banks = ctx.xpathEval(path)[0]
i = 0
for bank in banklist:
banknode = banks.newChild(None, "bank", None)
banknode.newProp("id", "%i" % i)
banknode.newProp("label", "%s" % bank)
i += 1
|
gpl-3.0
| 3,356,739,449,751,182,300
| 30.956
| 79
| 0.587182
| false
| 3.12926
| false
| false
| false
|
tedkulp/bossogg
|
boss3/Database.py
|
1
|
40212
|
#Boss Ogg - A Music Server
#(c)2003 by Ted Kulp (wishy@comcast.net)
#This project's homepage is: http://bossogg.wishy.org
#
#This program is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sqlite
import time
import os, sys
from boss3.bossexceptions import EndOfQueueException
from boss3.util import bosslog
from boss3.util.Session import *
import time
from boss3.util import UTFstring
import os
import string
import types
import thread
import threading
from boss3.metadata import id3
from boss3.metadata.id3 import getTag
try:
import boss3.ripper.ripper
from boss3.ripper.ripper import getCDDB
from boss3.ripper.ripper import pyrip
from boss3.ripper.ripper import pyrip_update
except Exception:
pass
log = bosslog.getLogger()
sql_lock = threading.RLock()
class Database:
conn = ""
dbname = ""
songcache = []
curindex = -1
genrecache = {}
artistcache = {}
albumcache = {}
getgenrestatus = False
getartiststatus = False
getmetaartiststatus = False
getalbumstatus = False
getsongstatus = False
#cursong = None
import_cursor = None
tables = {}
class _Cursor(sqlite.Cursor):
nolock=0
def getcaller(self):
f=sys._getframe(1)
f=f.f_back
return os.path.basename(f.f_code.co_filename), f.f_lineno
def execute(self, SQL, *args, **kwargs):
needlock=0
#if len(SQL)>0 and SQL.split()[0].lower() in ["delete", "update", "insert", "commit"] and not self.nolock:
needlock=1
if needlock:
sql_lock.acquire()
log.debug("lock", "Acquire lock for database writes", stack=1)
try:
log.debug("sqlquery", "SQL: "+SQL, stack=1, *args)
sqlite.Cursor.execute(self, SQL, *args)
except:
log.exception("SQL ERROR")
if "raise_except" in kwargs and kwargs["raise_except"] == 1:
if needlock:
sql_lock.release()
log.debug("lock", "Release lock for database writes", stack=1)
raise
if needlock:
sql_lock.release()
log.debug("lock", "Release lock for database writes", stack=1)
def begin(self):
self.execute("BEGIN TRANSACTION")
def commit(self):
self.execute("COMMIT TRANSACTION")
def rollback(self):
self.execute("ROLLBACK TRANSACTION")
def _cursor(self):
self.conn._checkNotClosed("cursor")
return self._Cursor(self.conn, self.conn.rowclass)
def loadTableStructures(self):
self.tables = {}
log.debug("funcs", "Database.tableStructures")
cursor = self.conn.cursor()
cursor.execute("select name,sql from sqlite_master where type='table'")
for row in cursor.fetchall():
self.tables[row[0]] = []
sql = row[1].split("\n")
for line in sql[1:-1]:
data = line.split()
field = data[0]
self.tables[row[0]].append(field)
cursor.close()
log.debug("import", "Got Table data %s", self.tables)
def connect(self, autocommit=True):
if ( (self.conn == None or self.conn == "") and self.dbname != ""):
self.conn = sqlite.connect(db=self.dbname, mode=755, autocommit=autocommit)
self.conn.cursor=self._cursor
def disconnect(self):
if (self.conn != None or self.conn != ""):
self.conn.close()
self.conn = None
def runScript(self,SQL):
cursor = self.conn.cursor()
cursor.execute(SQL)
#cursor.commit()
cursor.close()
def getSchemaVersion(self):
result = -1
cursor = self.conn.cursor()
SQL = "select versionnumber from version"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result = row['versionnumber']
return result
def setSchemaVersion(self, versionnumber):
cursor = self.conn.cursor()
SQL = "update version set versionnumber = %s"
cursor.execute(SQL, versionnumber)
def loadSongCache(self):
log.debug("funcs", "Database.loadSongCache() called")
cursor = self.conn.cursor()
SQL = """
SELECT songs.songid, songs.filename, songs.songlength, songs.flags
FROM songs, albums, artists
WHERE songs.albumid = albums.albumid and albums.artistid = artists.artistid
ORDER BY artists.artistname, albums.year, albums.albumname, songs.tracknum, songs.songname"""
cursor.execute(SQL)
i = 0
self.songcache = []
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
self.songcache.append({"filename":row['songs.filename'], "songid":row['songs.songid'], "songlength":row['songs.songlength'], "flags":row['songs.flags']})
i += 1
log.debug("cache", "Loaded %s songs into cache", i)
def getSongCacheSize(self):
if self.songcache is not None:
return len(self.songcache)
def loadState(self,player):
cursor = self.conn.cursor()
SQL = "select * from currentstate"
cursor.execute(SQL)
for row in cursor.fetchall():
player.songqueue.setCurrentIndex(row['queueindex'])
SQL = "select q.songid,s.filename,s.songlength,s.flags from queue q inner join songs s on q.songid = s.songid order by q.indexid"
cursor.execute(SQL)
i=0
for row in cursor.fetchall():
player.queueSong({"filename":row['s.filename'], "songid":row['q.songid'], "songlength":row['s.songlength'], "flags":row['s.flags']})
i += 1
if i == 0:
player.songqueue.setCurrentIndex(-1)
#if player.songqueue.currentindex > -1:
# player.songqueue.currentindex -= 1
def saveState(self,player):
cursor = self.conn.cursor()
SQL = "delete from currentstate"
cursor.execute(SQL)
SQL = "insert into currentstate (queueindex, playlistid, songid, shuffle) values (%s, %s, %s, %s)"
cursor.execute(SQL, player.songqueue.getCurrentIndex(),-1,player.songid,0)
SQL = "delete from queue"
cursor.execute(SQL)
queuesongids = player.songqueue.getSongIDs()
for i in queuesongids:
SQL = "insert into queue (songid) values (%s)"
cursor.execute(SQL, i)
def getNextSong(self, newindex = -1, shuffle = 0):
try:
if (newindex == None or newindex < 0):
self.curindex += 1
if (shuffle == None or shuffle == 0):
return self.songcache[self.curindex]
except Exception:
raise EndOfQueueException.EndOfQueueException("No songs left... need to go into stop mode")
def getRandomSong(self):
cursor = self.conn.cursor()
SQL = "select songs.songid, songs.filename, songs.songlength, songs.flags from songs order by random() limit 1"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
return {"filename":row['songs.filename'], "songid":row['songs.songid'], "songlength":row['songs.songlength'], "flags":row['songs.flags']}
raise EndOfQueueException.EndOfQueueException("No songs left... need to go into stop mode")
def getArtistInfo(self, artistid):
result = {}
cursor = self.conn.cursor()
SQL = "select artistid, aristname from artists where artistid = %s"
cursor.execute(SQL, artistid)
for row in cursor.fetchall():
log.debug("sqlresult", "XRow: %s", row)
result['artistid'] = row['artistid']
result['artistname'] = row['artistname']
return result
def getAlbumInfo(self, albumid):
result = {}
cursor = self.conn.cursor()
SQL = "select a.artistid, a.aristname, al.albumid, al.albumname, al.year from albums al inner join artists a on a.artistid = al.artistid where a.artistid = %s"
cursor.execute(SQL, albumid)
for row in cursor.fetchall():
log.debug("sqlresult", "XRow: %s", row)
result['artistid'] = row['a.artistid']
result['artistname'] = row['a.artistname']
result['albumid'] = row['a.albumid']
result['albumname'] = row['a.albumname']
result['albumyear'] = row['al.year']
return result
def getSongInfo(self, songids):
resultarray = []
cursor = self.conn.cursor()
whereclause = ""
for songid in songids:
whereclause += "songid = %s or " % songid
whereclause = whereclause[:-4]
SQL = "select s.songid, al.artistid, s.albumid, s.songname, s.bitrate, s.songlength, s.tracknum, s.filesize, s.timesplayed, s.filename, s.weight, s.flags, al.albumname, al.year, a.artistname, s.metaartistid, m.artistname from songs s inner join albums al on s.albumid = al.albumid inner join artists a on s.artistid = a.artistid outer left join artists m on m.artistid = s.metaartistid where %s" % whereclause
cursor.execute(SQL)
for row in cursor.fetchall():
result = {}
log.debug("sqlresult", "XRow: %s", row)
result['songid'] = row['s.songid']
result['albumid'] = row['s.albumid']
result['artistid'] = row['al.artistid']
result['artistname'] = row['a.artistname']
result['albumname'] = row['al.albumname']
result['songname'] = row['s.songname']
result['bitrate'] = row['s.bitrate']
result['songlength'] = row['s.songlength']
result['tracknum'] = row['s.tracknum']
result['filesize'] = row['s.filesize']
result['timesplayed'] = row['s.timesplayed']
result['filename'] = row['s.filename']
result['weight'] = row['s.weight']
result['flags'] = row['s.flags']
result['albumyear'] = row['al.year']
if row['m.artistname'] != None and row['s.metaartistid'] != '-1':
result['metaartistid'] = row['s.metaartistid']
result['metaartistname'] = row['m.artistname']
resultarray.append(result)
for result in resultarray:
songid = result['songid']
SQL = "select count(*) as thecount, type, songid from history where songid = %s group by songid, type order by songid"
result['timesstarted'] = result['timesplayed'] = result['timesrequested'] = 0
cursor.execute(SQL, songid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
if row['type'] == "s":
result['timesstarted'] = int(row['thecount'])
elif row['type'] == "p":
result['timesplayed'] = int(row['thecount'])
elif row['type'] == "q":
result['timesrequested'] = int(row['thecount'])
if result['timesplayed'] and result['timesstarted']:
result['percentagecompleted'] = (float(result['timesplayed']) / float(result['timesstarted'])) * float(100)
else:
result['percentagecompleted'] = float(0)
result['genres'] = self.fillSongGenreHash(songid)
#Now sort them in the original order...
oldresultarray = resultarray
resultarray = []
count = 0;
for songid in songids:
for i in oldresultarray:
if i['songid'] == songid:
i['index'] = count
resultarray.append(i)
count += 1
break
return resultarray
def authUser(self, username = "", password = ""):
result = None
cursor = self.conn.cursor()
SQL = "SELECT userid, authlevel FROM users "
SQL += "WHERE username = %s AND password = %s"
cursor.execute(SQL, username, password)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result = {"userid":row['userid'],"authlevel":['authlevel']}
return result
def listArtists(self, anchor=""):
result = []
cursor = self.conn.cursor()
SQL = "SELECT artistid, artistname FROM artists "
if (anchor != None and anchor != ""):
SQL += "WHERE artistname like '%s%%' " % anchor
SQL += "ORDER BY lower(artistname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
if row['artistname'] != '':
result.append({"artistid":row['artistid'],"artistname":row['artistname']})
return result
def listAlbums(self, artistid=None, genreid=None, anchor=""):
result = []
cursor = self.conn.cursor()
#Look for real albums first and stick them at the top of the list
if artistid != None:
SQL = "SELECT albumid, albumname, year FROM albums WHERE artistid = %i " % artistid
if (anchor != None and anchor != ""):
SQL += "AND albumname like '%s%%%%' " % anchor.replace("'", "\\'")
SQL += "ORDER BY year, lower(albumname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"albumid":row['albumid'],"albumname":row['albumname'],"albumyear":row['year'],"metaartist":0})
#Now look for metaartist related albums
SQL = "SELECT a.albumid, a.albumname, a.year FROM albums a INNER JOIN songs s ON a.albumid = s.albumid WHERE s.metaartistid = %s " % artistid
if (anchor != None and anchor != ""):
SQL += "AND a.albumname like '%s%%%%' " % anchor.replace("'", "\\'")
SQL += "ORDER BY a.year, lower(a.albumname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"albumid":row['a.albumid'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"metaartist":1})
elif genreid != None:
SQL = "SELECT DISTINCT ar.artistid, ar.artistname, a.albumid, a.albumname, a.year FROM artists ar INNER JOIN albums a ON ar.artistid = a.artistid INNER JOIN songs s ON a.albumid = s.albumid INNER JOIN genre_data gd ON gd.songid = s.songid WHERE gd.genreid = %s " % genreid
if (anchor != None and anchor != ""):
SQL += "AND albumname like '%s%%%%' " % anchor.replace("'", "\\'")
SQL += "ORDER BY lower(a.albumname), lower(ar.artistname) ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistid":row['ar.artistid'],"artistname":row['ar.artistname'],"albumid":row['a.albumid'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"metaartist":0})
else:
#SQL = "SELECT ar.artistid, ar.artistname, a.albumid, a.albumname, a.year FROM albums a INNER JOIN artists ar ON ar.artistid = a.artistid ORDER BY ar.artistname, a.year, lower(a.albumname) ASC"
SQL = "SELECT ar.artistid, ar.artistname, a.albumid, a.albumname, a.year FROM albums a INNER JOIN artists ar ON ar.artistid = a.artistid ORDER BY lower(a.albumname), ar.artistname ASC"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistid":row['ar.artistid'],"artistname":row['ar.artistname'],"albumid":row['a.albumid'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"metaartist":0})
return result
def listPlaylists(self):
result = []
cursor = self.conn.cursor()
SQL = "SELECT playlistid, name, userid FROM playlists order by playlistid"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"playlistid":row['playlistid'],"playlistname":row['name'],"userid":row['userid']})
return result
def listGenres(self):
result = []
cursor = self.conn.cursor()
SQL = "SELECT genreid, genrename FROM genres order by genreid"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"genreid":row['genreid'],"genrename":row['genrename']})
return result
def incrementTimesStarted(self, songid):
cursor = self.conn.cursor()
SQL = "INSERT INTO HISTORY (songid, type, time) VALUES (%s, 's', %s)"
cursor.execute(SQL, songid, time.time())
def incrementTimesPlayed(self, songid):
cursor = self.conn.cursor()
SQL = "UPDATE songs SET timesplayed = timesplayed + 1 where songid = %s"
cursor.execute(SQL, songid)
SQL = "INSERT INTO HISTORY (songid, type, time) VALUES (%s, 'p', %s)"
cursor.execute(SQL, songid, time.time())
def getIds(self, idtype, theid):
result = []
cursor = self.conn.cursor()
SQL = "select s.filename, s.songid, s.songlength, s.flags from songs s "
if idtype == "artistid":
SQL += ", albums a where s.artistid = %d and s.albumid = a.albumid order by a.year, a.albumname, s.tracknum, s.songname" % theid
elif idtype == "albumid":
SQL += "where albumid = %d order by tracknum, songname" % theid
elif idtype == "songid":
SQL += "where songid = %d" % theid
elif idtype == "playlistid":
SQL += ", playlistdata p where p.songid = s.songid and p.playlistid = %d order by p.indexid" % theid
elif idtype == "genreid":
SQL += "INNER JOIN genre_data gd ON s.songid = gd.songid WHERE gd.genreid = %d ORDER BY s.songid" % theid
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"filename":row['s.filename'], "songid":row['s.songid'], "songlength":row['s.songlength'], "flags":row['s.flags']})
#Now grab metaartist related songs if artistid is given
if idtype == "artistid":
SQL = "select s.filename, s.songid, s.songlength, s.flags from songs s, albums a where s.metaartistid = %d and s.albumid = a.albumid order by a.year, a.albumname, s.tracknum, s.songname"
cursor.execute(SQL, theid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"filename":row['s.filename'], "songid":row['s.songid'], "songlength":row['s.songlength'], "flags":row['s.flags']})
return result
def setQueueHistoryOnId(self, songid, userid=-1):
cursor = self.conn.cursor()
SQL = "INSERT INTO HISTORY (songid, type, time, userid) VALUES (%s, 'q', %s, %s)"
cursor.execute(SQL, songid, time.time(), userid)
def createPlaylist(self, name):
cursor = self.conn.cursor()
SQL = "SELECT playlistid from playlists where name = %s"
cursor.execute(SQL, name)
exists = -1
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
exists = 1
if (exists == -1):
now=time.time()
SQL = "INSERT into playlists (name,userid,create_date,modified_date) values (%s,-1,%s,%s)"
cursor.execute(SQL, name, now, now)
SQL = "SELECT playlistid from playlists where name = %s"
cursor.execute(SQL, name)
for row in cursor.fetchall():
exists = row['playlistid']
else:
exists = -1
return exists
def removePlaylist(self, playlistid):
cursor = self.conn.cursor()
SQL = "delete from playlistdata where playlistid = %s"
cursor.execute(SQL, playlistid)
SQL = "delete from playlists where playlistid = %s"
cursor.execute(SQL, playlistid)
def listSongs(self, artistid=None, albumid=None, playlistid=None, anchor="", getgenres=True):
log.debug("funcs", "Database.listSongs()")
result = []
cursor = self.conn.cursor()
SQL = "SELECT s.songid, s.artistid, ar.artistname, s.albumid, s.tracknum, s.songname, s.filename, s.filesize, s.bitrate, s.songlength, s.timesplayed, a.albumname, a.year, s.metaartistid, m.artistname"
if (playlistid != None and playlistid != ""):
SQL += ", p.indexid"
SQL += " FROM songs s, albums a, artists ar"
if (playlistid != None and playlistid != ""):
SQL += ", playlistdata p"
SQL += " LEFT OUTER JOIN artists m on s.metaartistid = m.artistid WHERE a.albumid = s.albumid and ar.artistid = s.artistid "
if (albumid != None and albumid != ""):
SQL += "AND s.albumid = %i " % albumid
if (artistid != None and artistid != ""):
SQL += "AND s.artistid = %i " % artistid
if (playlistid != None and playlistid != ""):
SQL += "AND p.playlistid = %i and p.songid = s.songid " % playlistid
if (anchor != None and anchor != ""):
SQL += "AND s.songname like '%s%%' " % anchor
SQL += "ORDER BY"
if playlistid != None and playlistid != "":
SQL += " p.indexid,"
SQL +=" a.year, lower(a.albumname), s.tracknum, s.songname"
cursor.execute(SQL)
for row in cursor.fetchall():
self.fillSongHash(row, result, getgenres)
if artistid != None and artistid != "":
SQL = "SELECT s.songid, s.artistid, ar.artistname, s.albumid, s.tracknum, s.songname, s.filename, s.filesize, s.bitrate, s.songlength, s.timesplayed, a.albumname, a.year, s.metaartistid, m.artistname FROM songs s, albums a, artists ar LEFT OUTER JOIN artists m on s.metaartistid = m.artistid WHERE a.albumid = s.albumid and ar.artistid = s.artistid AND s.metaartistid = %s"
cursor.execute(SQL, artistid)
for row in cursor.fetchall():
self.fillSongHash(row, result)
return result
def fillSongHash(self, row, result, getgenres=True):
log.debug("sqlresult", "Row: %s", row)
timesplayed = 0
if row['s.timesplayed'] != None:
timesplayed = row['s.timesplayed']
somesong = {"songid":row['s.songid'],"artistid":row['s.artistid'],"albumid":row['s.albumid'],"songname":row['s.songname'],"filename":row['s.filename'],"filesize":row['s.filesize'],"songlength":row['s.songlength'],"tracknum":row['s.tracknum'],"timesplayed":timesplayed,"bitrate":row['s.bitrate'],"albumname":row['a.albumname'],"albumyear":row['a.year'],"artistname":row['ar.artistname']}
if 'p.indexid' in row:
somesong['indexid'] = row['p.indexid']
if row['m.artistname'] != None and row['s.metaartistid'] != '-1':
somesong['metaartistid'] = row['s.metaartistid']
somesong['metaartistname'] = row['m.artistname']
if getgenres == True:
somesong['genres'] = self.fillSongGenreHash(row['s.songid'])
result.append(somesong)
def fillSongGenreHash(self, songid):
result = []
cursor = self.conn.cursor()
SQL = "SELECT g.genreid, g.genrename FROM genre_data d INNER JOIN genres g ON d.genreid = g.genreid WHERE d.songid = %s"
cursor.execute(SQL, songid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"genreid":row['g.genreid'],"genrename":row['g.genrename']})
return result
def topArtists(self, numbertoget):
result = []
cursor = self.conn.cursor()
SQL = "select count(*) as thecount, a.artistname from history h inner join songs s on h.songid = s.songid inner join artists a on a.artistid = s.artistid where h.type = 'p' group by a.artistname order by thecount desc, a.artistname asc limit %s"
cursor.execute(SQL, numbertoget)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistname":row['a.artistname'],"count":int(row['thecount'])})
return result
def topAlbums(self, numbertoget):
result = []
cursor = self.conn.cursor()
SQL = "select count(*) as thecount, al.albumname, a.artistname from history h inner join songs s on h.songid = s.songid inner join albums al on s.albumid = al.albumid inner join artists a on al.artistid = a.artistid where h.type = 'p' group by a.artistname, al.albumname order by thecount desc, a.artistname asc, al.albumname asc limit %s"
cursor.execute(SQL, numbertoget)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistname":row['a.artistname'],"albumname":row['al.albumname'],"count":int(row['thecount'])})
return result
def topSongs(self, numbertoget):
result = []
cursor = self.conn.cursor()
SQL = "select count(*) as thecount, al.albumname, a.artistname, s.songname from history h inner join songs s on h.songid = s.songid inner join albums al on s.albumid = al.albumid inner join artists a on al.artistid = a.artistid where h.type = 'p' group by a.artistname, al.albumname, s.songname order by thecount desc, a.artistname asc, al.albumname asc, s.songname asc limit %s"
cursor.execute(SQL, numbertoget)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result.append({"artistname":row['a.artistname'],"albumname":row['al.albumname'],"songname":row['s.songname'],"count":int(row['thecount'])})
return result
def getStats(self):
result = {}
cursor = self.conn.cursor()
SQL = "select count(*) as numartists from artists"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["numartists"] = int(row['numartists'])
SQL = "select count(*) as numalbums from albums"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["numalbums"] = int(row['numalbums'])
SQL = "select count(*) as numsongs, sum(filesize) as sumfilesize, sum(songlength) as sumsec, avg(filesize) as avgfilesize, avg(songlength) as avgsec from songs"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["numsongs"] = int(row['numsongs'])
result["sumfilesize"] = float(row['sumfilesize'])
result["sumsec"] = float(row['sumsec'])
result["avgfilesize"] = float(row['avgfilesize'])
result["avgsec"] = float(row['avgsec'])
SQL = "select count(*) as songsplayed from history where type = 'p'"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["songsplayed"] = int(row['songsplayed'])
SQL = "select count(*) as songsstarted from history where type = 's'"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
result["songsstarted"] = int(row['songsstarted'])
return result
def importCache(self):
log.debug("funcs", "Database.importCache()")
result = []
cursor = self.conn.cursor()
SQL = "SELECT filename, modified_date FROM songs ORDER BY filename"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
# if type(row['modified_date']) is not FloatType:
# row['modified_date'] =
result.append({"filename":row['filename'],"modifieddate":row['modified_date']})
cursor.close()
return result
def getmetadata(self, filename):
log.debug("funcs", "Database.getmetadata(%s)", filename)
return getTag(filename)
def importNewSongs(self, songs):
log.debug("funcs", "Database.importNewSongs()")
cursor = self.import_cursor
cursor.begin()
try:
for song in songs:
log.debug("import", "Importing song %s as %s", song["filename"], song)
if "bitrate" in song.keys():
genreid = -1
if 'genre' in song.keys():
genreid = self._getGenre(self.checkBinary(song['genre']))
artistid = self._getArtist(self.checkBinary(song['artistname']),False)
metaartistid = -1
if 'metaartistname' in song.keys():
metaartistid = self._getArtist(self.checkBinary(song['metaartistname']),True)
albumid = self._getAlbum(self.checkBinary(song['albumname']), artistid, song['year'])
songid = self._getNSong(self.checkBinary(song['songname']),artistid,self.checkBinary(song['filename']),song['tracknum'],albumid=albumid,year=song['year'],metaartistid=metaartistid, bitrate=song["bitrate"], songlength=song["songlength"], genreid=genreid)
else:
log.debug("import", "Could not get bitrate of song %s. Assuming bad file.", song["filename"])
except:
cursor.rollback()
raise
cursor.commit()
return True
def _getNSong(self, songname, artistid, filename, tracknum, albumid="", year="", metaartistid=-1, genreid=-1, bitrate=-1, songlength=-1):
log.debug("funcs", "Database._getNSongs()")
sid = -1
songname = string.strip(songname)
filename = string.strip(filename)
cursor = self.import_cursor
statinfo = os.stat(filename)
now = time.time()
if tracknum == -1:
tracknum = 0
if filename not in self.i_songcache:
SQL = "insert into songs (songname, artistid, albumid, year, tracknum, filename, filesize, songlength, bitrate, metaartistid, create_date, modified_date, timesplayed, weight, flags) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0, 0, 0)"
cursor.execute(SQL, songname, artistid, albumid, year, tracknum, filename, statinfo.st_size, songlength, bitrate, metaartistid, now, now)
self.getalbumstatus = True
sid = cursor.lastrowid
if genreid != -1:
SQL = "insert into genre_data(songid, genreid) VALUES (%s, %s)"
cursor.execute(SQL, sid, genreid)
self.i_songcache[filename] = sid
#TODO: Check to see if there are changes
else:
sid = self.i_songcache["filename"]
SQL = "update songs set modified_date = %s, songname = %s, artistid = %s, albumid = %s, year = %s, tracknum = %s, filename = %s, songlength = %s, bitrate = %s, metaartistid = %s, filesize = %s where songid = %s"
cursor.execute(SQL, now, songname, artistid, albumid, year, tracknum, filename, songlength, bitrate, metaartistid, statinfo.st_size, sid)
if genreid != -1:
SQL = "update genre_data set genreid=%s WHERE songid=%s"
cursor.execute(SQL, genreid, sid)
self.getalbumstatus = False
return sid
def importStart(self):
log.debug("funcs", "Database.importStart()")
session = Session()
self.genrecache = {}
self.artistcache = {}
self.albumcache = {}
self.i_songcache = {}
#self.cursong = session['xinelib'].createSong()
#self.cursong.songInit()
self.import_cursor = self.conn.cursor()
self.import_cursor.nolock=1
cursor=self.import_cursor
SQL = "select artistname,artistid from artists"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
self.artistcache[row[0]] = int(row[1])
SQL = "select artistid,albumname,albumid from albums"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
self.albumcache[str(row[0])+row[1]] = int(row[2])
SQL = "select filename,songid from songs"
cursor.execute(SQL)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
self.i_songcache[row[0]] = int(row[1])
def importEnd(self):
log.debug("funcs", "Database.importEnd()")
cursor = self.import_cursor
cursor.begin()
try:
SQL = "DELETE FROM albums WHERE albumid NOT IN (SELECT albumid FROM songs)"
cursor.execute(SQL)
SQL = "DELETE FROM artists WHERE artistid NOT IN (SELECT artistid FROM songs) and artistid NOT IN (SELECT metaartistid as artistid FROM songs)"
cursor.execute(SQL)
except:
cusor.rollback()
raise
cursor.commit()
log.debug("import", "Import complete, loading song cache (before %d)", len(self.songcache))
try:
self.loadSongCache()
except:
log.exception("Got exception trying to upgrade song cache")
log.debug("import", "Cache update complete. Cache contains %d songs", len(self.songcache))
def importCancel(self):
log.debug("funcs", "Database.importCancel()")
def importUpload(self, filename, songdata):
log.debug("funcs", "Database.importUpload()")
log.debug("import", "getting tag info for: %s" % self.checkBinary(filename))
return getTag(self.checkBinary(filename))
def importSongs(self, somesong):
log.debug("funcs", "Database.importSongs()")
resultmem = {}
genreid = -1
if 'genrename' in somesong.keys():
genreid = self._getGenre(self.checkBinary(somesong['genrename']))
artistid = self._getArtist(self.checkBinary(somesong['artistname']),False)
metaartistid = -1
if 'metaartistname' in somesong.keys():
metaartistid = self._getArtist(self.checkBinary(somesong['metaartistname']),True)
albumid = self._getAlbum(self.checkBinary(somesong['albumname']), artistid, somesong['year'])
songid = self._getSong(self.checkBinary(somesong['songname']),artistid,self.checkBinary(somesong['filename']),somesong['tracknum'],albumid,somesong['year'],metaartistid,genreid=genreid)
resultmem['genreid'] = genreid
resultmem['artistid'] = artistid
resultmem['metaartistid'] = metaartistid
resultmem['albumid'] = albumid
resultmem['songid'] = songid
if self.getgenrestatus != -1:
resultmem['newgenreid'] = self.getgenrestatus
if self.getartiststatus != -1:
resultmem['newartistid'] = self.getartiststatus
if self.getmetaartiststatus != -1:
resultmem['newmetaartistid'] = self.getmetaartiststatus
if self.getalbumstatus != -1:
resultmem['newalbumid'] = self.getalbumstatus
if self.getsongstatus != -1:
resultmem['newsongid'] = self.getsongstatus
return resultmem
def importDelete(self, arrayofsongs):
log.debug("funcs", "Database.importDelete()")
cursor = self.import_cursor
result = 0
for somesong in arrayofsongs:
somesong = self.checkBinary(somesong)
SQL=""
if isinstance(somesong,types.IntType):
SQL = "DELETE FROM songs WHERE songid = %s"
elif isinstance(somesong,types.StringType):
SQL = "DELETE FROM songs WHERE filename = %s"
if SQL!="":
cursor.execute(SQL, somesong)
result += 1
return result
def playlistClear(self, playlistid):
cursor = self.conn.cursor()
SQL = "DELETE FROM playlistdata WHERE playlistid = %s"
cursor.execute(SQL, playlistid)
def addSongToPlaylist(self, playlistid, songid):
cursor = self.conn.cursor()
#SQL = "INSERT INTO playlistdata (playlistid, songid) VALUES (%d, %d)" % (playlistid, songid)
SQL = "INSERT INTO playlistdata (playlistid, songid, indexid) values (%d,%d,(select count(playlistdataid) from playlistdata where playlistid = %s))"
cursor.execute(SQL, playlistid, songid, playlistid)
def removeSongFromPlaylist(self, playlistid, indexid):
playlistdataid = 0
cursor = self.conn.cursor()
SQL = "DELETE from playlistdata where playlistid = %d and indexid = %s"
cursor.execute(SQL, playlistid, indexid)
SQL = "UPDATE playlistdata set indexid = indexid - 1 where playlistid = %s and indexid > %s"
cursor.execute(SQL, playlistid, indexid)
def moveSongInPlaylist(self, playlistid, index1, index2, swap=False):
songs = []
cursor = self.conn.cursor()
SQL = "SELECT songid from playlistdata where playlistid = %s order by indexid"
cursor.execute(SQL, playlistid)
for row in cursor.fetchall():
log.debug("sqlresult", "Row: %s", row)
songs.append(row['songid'])
if index1 > -1 and index1 < len(songs) and index2 > -1 and index2 < len(songs):
if swap == False:
tmp = songs[index1]
songs.pop(index1)
songs.insert(index2, tmp)
else:
tmp = songs[index1]
songs[index1] = songs[index2]
songs[index2] = tmp
self.playlistClear(playlistid)
for i in songs:
self.addSongToPlaylist(playlistid, i)
def _getGenre(self, genrename):
log.debug("funcs", "Database._getGenre()")
gid = -1
genrename = string.strip(genrename)
cursor = self.import_cursor
if genrename not in self.genrecache:
SQL = "select genreid from genres where genrename = %s"
cursor.execute(SQL, genrename)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
gid = row['genreid']
if gid == -1:
now = time.time()
SQL = "insert into genres (genrename, create_date) values (%s, %s)"
cursor.execute(SQL, genrename, now)
self.getgenrestatus = True
SQL = "select genreid from genres where genrename = %s"
cursor.execute(SQL, genrename)
for row in cursor.fetchall():
log.debug("sqlresult", "%s", row)
gid = row['genreid']
else:
SQL = "update genres set genrename = %s, modified_date = %s where genreid=%s"
cursor.execute(SQL, genrename, now, gid)
self.getgenrestatus = False
self.genrecache[genrename] = gid
else:
self.getgenrestatus = -1
gid = self.genrecache[genrename]
return gid
def _getArtist(self, artistname, metaartist=False):
log.debug("funcs", "Database._getArtist()")
aid = -1
artistname = string.strip(artistname)
cursor = self.import_cursor
#See if this artist is already in the cache
if artistname not in self.artistcache:
#SQL = "select artistid from artists where artistname = %s"
#cursor.execute(SQL, artistname)
#for row in cursor.fetchall():
# log.debug("sqlresult", "Row: %s", row)
# aid = row['artistid']
now = time.time()
try:
metaartist = int(metaartist)
except:
metaartist = 0
if aid == -1:
SQL = "insert into artists (artistname, metaflag, create_date, modified_date) VALUES (%s, %s, %s, %s)"
cursor.execute(SQL, artistname, int(metaartist), now, now)
self.getartiststatus = True
aid = cursor.lastrowid
#Not needed until we have genres and/or metaartists
else:
SQL = "update artists set metaflag = %s, modified_date = %s where artistid = %s"
cursor.execute(SQL, metaartist, now, aid)
self.getartiststatus = False
self.artistcache[artistname] = aid
else:
self.getartiststatus = -1
aid = self.artistcache[artistname]
return aid
def _getAlbum(self, albumname, artistid, year):
tid = -1
albumname = string.strip(albumname)
cursor = self.import_cursor
#See if this album is already in the cache
if str(str(artistid) + albumname) not in self.albumcache:
#SQL = "select albumid from albums where albumname = %s"
#cursor.execute(SQL, albumname)
#for row in cursor.fetchall():
# log.debug("sqlresult", "Row: %s", row)
# tid = row['albumid']
now=time.time()
if tid == -1:
SQL = "insert into albums (albumname, artistid, year, create_date, modified_date) VALUES (%s, %s, %s, %s, %s)"
cursor.execute(SQL, albumname, artistid, year, now, now)
self.getalbumstatus = True
tid = cursor.lastrowid
#TODO: Check to see if there are changes
else:
#TODO: Have to add genre code
SQL = "update albums set modified_date = %s, year = %s, artistid = %s where albumid = %s"
cursor.execute(SQL, now, year, artistid, tid)
self.getalbumstatus = False
self.albumcache[str(artistid) + albumname] = tid
else:
self.getalbumstatus = -1
tid = self.albumcache[str(artistid) + albumname]
return tid
def _getSong(self, songname, artistid, filename, tracknum, albumid="", year="", metaartistid=-1, genreid=-1):
sid = -1
songname = string.strip(songname)
filename = string.strip(filename)
cursor = self.import_cursor
#SQL = "select songid from songs where filename = %s"
#cursor.execute(SQL, filename)
#for row in cursor.fetchall():
# log.debug("sqlresult", "Row: %s", row)
# sid = row['songid']
#metadata = self.cursong.getMetaData()
metadata = {}
try:
#Jef 07/30/2003: Not sure why but metadata=metadata.id3.getTag(filename) isnt working
metadata = getTag(filename)
log.debug("import", "Metadata %s", metadata)
except:
log.debug("import", "No metadata for %s", filename)
if "bitrate" not in metadata or "songlength" not in metadata:
pass
#print "before set filename"
#self.cursong.songint.filename = filename
#print "before open"
#self.cursong.songOpen()
#print "before metadata"
#metadata = self.cursong.getMetaData()
#self.cursong.songClose()
#print "after metadata"
statinfo = os.stat(filename)
songlength = 0
if metadata['songlength'] is not None and str(metadata['songlength']) != 'inf':
songlength = metadata['songlength']
now = time.time()
artistid = int(artistid)
albumid = int(albumid)
year = int(year)
if filename not in self.i_songcache:
SQL = "insert into songs (songname, artistid, albumid, year, tracknum, filename, filesize, songlength, bitrate, metaartistid, create_date, modified_date, timesplayed, weight, flags) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 0, 0, 0)"
cursor.execute(SQL, songname, artistid, albumid, year, tracknum, filename, statinfo.st_size, songlength, metadata['bitrate'], metaartistid, now, now)
self.getalbumstatus = True
sid = cursor.lastrowid
if genreid > -1:
SQL = "insert into genre_data(songid, genreid, create_date, modified_date) VALUES (%s, %s, %s, %s)"
cursor.execute(SQL, sid, genreid, now, now)
self.i_songcache[filename] = sid
#TODO: Check to see if there are changes
else:
SQL = "update songs set modified_date = %s, songname = %s, artistid = %s, albumid = %s, year = %s, tracknum = %s, filename = %s, songlength = %s, bitrate = %s, metaartistid = %s, filesize = %s where songid = %s"
cursor.execute(SQL, now, songname, artistid, albumid, year, tracknum, filename, metadata['songlength'], metadata['bitrate'], metaartistid, statinfo.st_size, sid)
if genreid > -1:
SQL = "update genre_data set genreid=%s, modified_date=%s WHERE songid=%s"
cursor.execute(SQL, genreid, now, sid)
self.getalbumstatus = False
return sid
def checkBinary(self, datatocheck):
return UTFstring.decode(datatocheck)
def getCDDB(self, device):
tags = getCDDB(device)
return tags
def pyrip(self, tags, filenames, device):
session = Session()
pyrip (device, session['cfg'], tags, filenames)
return 0
def pyrip_update (self):
ret = pyrip_update()
if ret['done'] == 1:
print "Importing files:"
print ret
session = Session ()
session['cmdint'].db.importstart()
i = 1
for file in ret['filenames_c']:
tag = session['cmdint'].db.importupload(file)
tag['filename'] = file
tag['metaartistname'] = ''
tag['tracknum'] = i
for key in tag:
tmp = UTFstring.encode (tag[key])
tag[key] = tmp
session['cmdint'].db.importsongs(tag)
i = i + 1
session['cmdint'].db.importend()
print "Done importing"
return ret
# vim:ts=8 sw=8 noet
|
gpl-2.0
| -2,373,406,627,346,853,400
| 38.735178
| 411
| 0.680643
| false
| 3.009205
| false
| false
| false
|
Tapo4ek/django-cacheops
|
cacheops/query.py
|
1
|
20226
|
# -*- coding: utf-8 -*-
import sys
import json
import threading
import six
from funcy import select_keys, cached_property, once, once_per, monkey, wraps, walk
from funcy.py2 import mapcat, map
from .cross import pickle, md5
import django
from django.utils.encoding import smart_str, force_text
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS
from django.db.models import Manager, Model
from django.db.models.query import QuerySet
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed
# This thing was removed in Django 1.8
try:
from django.db.models.query import MAX_GET_RESULTS
except ImportError:
MAX_GET_RESULTS = None
from .conf import model_profile, CACHEOPS_LRU, ALL_OPS
from .utils import monkey_mix, stamp_fields, func_cache_key, cached_view_fab, family_has_profile
from .redis import redis_client, handle_connection_failure, load_script
from .tree import dnfs
from .invalidation import invalidate_obj, invalidate_dict, no_invalidation
from .transaction import in_transaction
from .signals import cache_read
__all__ = ('cached_as', 'cached_view_as', 'install_cacheops')
_local_get_cache = {}
@handle_connection_failure
def cache_thing(cache_key, data, cond_dnfs, timeout):
"""
Writes data to cache and creates appropriate invalidators.
"""
assert not in_transaction()
load_script('cache_thing', CACHEOPS_LRU)(
keys=[cache_key],
args=[
pickle.dumps(data, -1),
json.dumps(cond_dnfs, default=str),
timeout
]
)
def cached_as(*samples, **kwargs):
"""
Caches results of a function and invalidates them same way as given queryset.
NOTE: Ignores queryset cached ops settings, just caches.
"""
timeout = kwargs.get('timeout')
extra = kwargs.get('extra')
key_func = kwargs.get('key_func', func_cache_key)
# If we unexpectedly get list instead of queryset return identity decorator.
# Paginator could do this when page.object_list is empty.
if len(samples) == 1 and isinstance(samples[0], list):
return lambda func: func
def _get_queryset(sample):
if isinstance(sample, Model):
queryset = sample.__class__.objects.filter(pk=sample.pk)
elif isinstance(sample, type) and issubclass(sample, Model):
queryset = sample.objects.all()
else:
queryset = sample
queryset._require_cacheprofile()
return queryset
querysets = map(_get_queryset, samples)
cond_dnfs = mapcat(dnfs, querysets)
key_extra = [qs._cache_key() for qs in querysets]
key_extra.append(extra)
if not timeout:
timeout = min(qs._cacheconf['timeout'] for qs in querysets)
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
if in_transaction():
return func(*args, **kwargs)
cache_key = 'as:' + key_func(func, args, kwargs, key_extra)
cache_data = redis_client.get(cache_key)
cache_read.send(sender=None, func=func, hit=cache_data is not None)
if cache_data is not None:
return pickle.loads(cache_data)
result = func(*args, **kwargs)
cache_thing(cache_key, result, cond_dnfs, timeout)
return result
return wrapper
return decorator
def cached_view_as(*samples, **kwargs):
return cached_view_fab(cached_as)(*samples, **kwargs)
class QuerySetMixin(object):
@cached_property
def _cacheprofile(self):
profile = model_profile(self.model)
if profile:
self._cacheconf = profile.copy()
self._cacheconf['write_only'] = False
return profile
@cached_property
def _cloning(self):
return 1000
def _require_cacheprofile(self):
if self._cacheprofile is None:
raise ImproperlyConfigured(
'Cacheops is not enabled for %s.%s model.\n'
'If you don\'t want to cache anything by default '
'you can configure it with empty ops.'
% (self.model._meta.app_label, self.model._meta.model_name))
def _cache_key(self):
"""
Compute a cache key for this queryset
"""
md = md5()
md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__))
# Vary cache key for proxy models
md.update('%s.%s' % (self.model.__module__, self.model.__name__))
# Protect from field list changes in model
md.update(stamp_fields(self.model))
# Use query SQL as part of a key
try:
sql, params = self.query.get_compiler(self._db or DEFAULT_DB_ALIAS).as_sql()
try:
sql_str = sql % params
except UnicodeDecodeError:
sql_str = sql % walk(force_text, params)
md.update(smart_str(sql_str))
except EmptyResultSet:
pass
# If query results differ depending on database
if self._cacheprofile and not self._cacheprofile['db_agnostic']:
md.update(self.db)
# Thing only appeared in Django 1.9
it_class = getattr(self, '_iterable_class', None)
if it_class:
md.update('%s.%s' % (it_class.__module__, it_class.__name__))
# 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier
if hasattr(self, 'flat'):
md.update(str(self.flat))
return 'q:%s' % md.hexdigest()
def _cache_results(self, cache_key, results):
cond_dnfs = dnfs(self)
cache_thing(cache_key, results, cond_dnfs, self._cacheconf['timeout'])
def cache(self, ops=None, timeout=None, write_only=None):
"""
Enables caching for given ops
ops - a subset of {'get', 'fetch', 'count', 'exists'},
ops caching to be turned on, all enabled by default
timeout - override default cache timeout
write_only - don't try fetching from cache, still write result there
NOTE: you actually can disable caching by omiting corresponding ops,
.cache(ops=[]) disables caching for this queryset.
"""
self._require_cacheprofile()
if ops is None or ops == 'all':
ops = ALL_OPS
if isinstance(ops, str):
ops = {ops}
self._cacheconf['ops'] = set(ops)
if timeout is not None:
self._cacheconf['timeout'] = timeout
if write_only is not None:
self._cacheconf['write_only'] = write_only
return self
def nocache(self):
"""
Convinience method, turns off caching for this queryset
"""
# cache profile not present means caching is not enabled for this model
if self._cacheprofile is None:
return self
else:
return self.cache(ops=[])
def cloning(self, cloning=1000):
self._cloning = cloning
return self
def inplace(self):
return self.cloning(0)
if django.VERSION >= (1, 9):
def _clone(self, **kwargs):
if self._cloning:
return self.clone(**kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
else:
def _clone(self, klass=None, setup=False, **kwargs):
if self._cloning:
return self.clone(klass, setup, **kwargs)
elif klass is not None:
# HACK: monkey patch self.query.clone for single call
# to return itself instead of cloning
original_query_clone = self.query.clone
def query_clone():
self.query.clone = original_query_clone
return self.query
self.query.clone = query_clone
return self.clone(klass, setup, **kwargs)
else:
self.__dict__.update(kwargs)
return self
def clone(self, klass=None, setup=False, **kwargs):
kwargs.setdefault('_cacheprofile', self._cacheprofile)
if hasattr(self, '_cacheconf'):
kwargs.setdefault('_cacheconf', self._cacheconf)
clone = self._no_monkey._clone(self, klass, setup, **kwargs)
clone._cloning = self._cloning - 1 if self._cloning else 0
return clone
def iterator(self):
# If cache is not enabled or in transaction just fall back
if not self._cacheprofile or 'fetch' not in self._cacheconf['ops'] \
or in_transaction():
return self._no_monkey.iterator(self)
cache_key = self._cache_key()
if not self._cacheconf['write_only'] and not self._for_write:
# Trying get data from cache
cache_data = redis_client.get(cache_key)
cache_read.send(sender=self.model, func=None, hit=cache_data is not None)
if cache_data is not None:
return iter(pickle.loads(cache_data))
# Cache miss - fetch data from overriden implementation
def iterate():
# NOTE: we are using self._result_cache to avoid fetching-while-fetching bug #177
self._result_cache = []
for obj in self._no_monkey.iterator(self):
self._result_cache.append(obj)
yield obj
self._cache_results(cache_key, self._result_cache)
return iterate()
def count(self):
if self._cacheprofile and 'count' in self._cacheconf['ops']:
# Optmization borrowed from overriden method:
# if queryset cache is already filled just return its len
if self._result_cache is not None:
return len(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.count(self))()
else:
return self._no_monkey.count(self)
def get(self, *args, **kwargs):
# .get() uses the same .iterator() method to fetch data,
# so here we add 'fetch' to ops
if self._cacheprofile and 'get' in self._cacheconf['ops']:
# NOTE: local_get=True enables caching of simple gets in local memory,
# which is very fast, but not invalidated.
# Don't bother with Q-objects, select_related and previous filters,
# simple gets - thats what we are really up to here.
if self._cacheprofile['local_get'] \
and not args \
and not self.query.select_related \
and not self.query.where.children:
# NOTE: We use simpler way to generate a cache key to cut costs.
# Some day it could produce same key for diffrent requests.
key = (self.__class__, self.model) + tuple(sorted(kwargs.items()))
try:
return _local_get_cache[key]
except KeyError:
_local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs)
return _local_get_cache[key]
except TypeError:
# If some arg is unhashable we can't save it to dict key,
# we just skip local cache in that case
pass
if 'fetch' in self._cacheconf['ops']:
qs = self
else:
qs = self._clone().cache()
else:
qs = self
return qs._no_monkey.get(qs, *args, **kwargs)
def exists(self):
if self._cacheprofile and 'exists' in self._cacheconf['ops']:
if self._result_cache is not None:
return bool(self._result_cache)
return cached_as(self)(lambda: self._no_monkey.exists(self))()
else:
return self._no_monkey.exists(self)
def bulk_create(self, objs, batch_size=None):
objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size)
if family_has_profile(self.model):
for obj in objs:
invalidate_obj(obj)
return objs
def invalidated_update(self, **kwargs):
clone = self._clone().nocache()
clone._for_write = True # affects routing
objects = list(clone.iterator()) # bypass queryset cache
rows = clone.update(**kwargs)
objects.extend(clone.iterator())
for obj in objects:
invalidate_obj(obj)
return rows
def connect_first(signal, receiver, sender):
old_receivers = signal.receivers
signal.receivers = []
signal.connect(receiver, sender=sender)
signal.receivers += old_receivers
# We need to stash old object before Model.save() to invalidate on its properties
_old_objs = threading.local()
class ManagerMixin(object):
@once_per('cls')
def _install_cacheops(self, cls):
cls._cacheprofile = model_profile(cls)
if family_has_profile(cls):
# Set up signals
connect_first(pre_save, self._pre_save, sender=cls)
connect_first(post_save, self._post_save, sender=cls)
connect_first(post_delete, self._post_delete, sender=cls)
# Install auto-created models as their module attributes to make them picklable
module = sys.modules[cls.__module__]
if not hasattr(module, cls.__name__):
setattr(module, cls.__name__, cls)
def contribute_to_class(self, cls, name):
self._no_monkey.contribute_to_class(self, cls, name)
# Django 1.7+ migrations create lots of fake models, just skip them
# NOTE: we make it here rather then inside _install_cacheops()
# because we don't want @once_per() to hold refs to all of them.
if cls.__module__ != '__fake__':
self._install_cacheops(cls)
def _pre_save(self, sender, instance, **kwargs):
if instance.pk is not None and not no_invalidation.active:
try:
_old_objs.__dict__[sender, instance.pk] = sender.objects.get(pk=instance.pk)
except sender.DoesNotExist:
pass
def _post_save(self, sender, instance, **kwargs):
# Invoke invalidations for both old and new versions of saved object
old = _old_objs.__dict__.pop((sender, instance.pk), None)
if old:
invalidate_obj(old)
invalidate_obj(instance)
# NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile,
# but its base having one. Or vice versa.
# We still need to invalidate in this case, but cache on save better be skipped.
if not instance._cacheprofile or in_transaction():
return
# Enabled cache_on_save makes us write saved object to cache.
# Later it can be retrieved with .get(<cache_on_save_field>=<value>)
# <cache_on_save_field> is pk unless specified.
# This sweet trick saves a db request and helps with slave lag.
cache_on_save = instance._cacheprofile.get('cache_on_save')
if cache_on_save:
# HACK: We get this object "from field" so it can contain
# some undesirable attributes or other objects attached.
# RelatedField accessors do that, for example.
#
# So we strip down any _*_cache attrs before saving
# and later reassign them
unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__)
for k in unwanted_dict:
del instance.__dict__[k]
key = 'pk' if cache_on_save is True else cache_on_save
cond = {key: getattr(instance, key)}
qs = sender.objects.inplace().filter(**cond).order_by()
if MAX_GET_RESULTS:
qs = qs[:MAX_GET_RESULTS + 1]
qs._cache_results(qs._cache_key(), [instance])
# Reverting stripped attributes
instance.__dict__.update(unwanted_dict)
def _post_delete(self, sender, instance, **kwargs):
"""
Invalidation upon object deletion.
"""
# NOTE: this will behave wrong if someone changed object fields
# before deletion (why anyone will do that?)
invalidate_obj(instance)
def inplace(self):
return self.get_queryset().inplace()
def cache(self, *args, **kwargs):
return self.get_queryset().cache(*args, **kwargs)
def nocache(self):
return self.get_queryset().nocache()
def invalidated_update(self, **kwargs):
return self.get_queryset().inplace().invalidated_update(**kwargs)
def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None,
**kwargs):
"""
Invoke invalidation on m2m changes.
"""
# Skip this machinery for explicit through tables,
# since post_save and post_delete events are triggered for them
if not sender._meta.auto_created:
return
if action not in ('pre_clear', 'post_add', 'pre_remove'):
return
m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many
if m2m.rel.through == sender)
# TODO: optimize several invalidate_objs/dicts at once
if action == 'pre_clear':
# TODO: always use column names here once Django 1.3 is dropped
instance_field = m2m.m2m_reverse_field_name() if reverse else m2m.m2m_field_name()
objects = sender.objects.filter(**{instance_field: instance.pk})
for obj in objects:
invalidate_obj(obj)
elif action in ('post_add', 'pre_remove'):
instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name()
if reverse:
instance_column, model_column = model_column, instance_column
# NOTE: we don't need to query through objects here,
# cause we already know all their meaningfull attributes.
for pk in pk_set:
invalidate_dict(sender, {
instance_column: instance.pk,
model_column: pk
})
@once
def install_cacheops():
"""
Installs cacheops by numerous monkey patches
"""
monkey_mix(Manager, ManagerMixin)
monkey_mix(QuerySet, QuerySetMixin)
QuerySet._cacheprofile = QuerySetMixin._cacheprofile
QuerySet._cloning = QuerySetMixin._cloning
# DateQuerySet existed in Django 1.7 and earlier
# Values*QuerySet existed in Django 1.8 and earlier
from django.db.models import query
for cls_name in ('ValuesQuerySet', 'ValuesListQuerySet', 'DateQuerySet'):
if hasattr(query, cls_name):
cls = getattr(query, cls_name)
monkey_mix(cls, QuerySetMixin, ['iterator'])
# Use app registry to introspect used apps
from django.apps import apps
# Install profile and signal handlers for any earlier created models
for model in apps.get_models(include_auto_created=True):
model._default_manager._install_cacheops(model)
# Turn off caching in admin
if apps.is_installed('django.contrib.admin'):
from django.contrib.admin.options import ModelAdmin
@monkey(ModelAdmin)
def get_queryset(self, request):
return get_queryset.original(self, request).nocache()
# Bind m2m changed handler
m2m_changed.connect(invalidate_m2m)
# Make buffers/memoryviews pickleable to serialize binary field data
if six.PY2:
import copy_reg
copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),)))
if six.PY3:
import copyreg
copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
|
bsd-3-clause
| -6,195,858,916,017,696,000
| 37.234405
| 98
| 0.597399
| false
| 4.125229
| false
| false
| false
|
leigh123linux/Cinnamon
|
files/usr/share/cinnamon/cinnamon-settings/modules/cs_workspaces.py
|
3
|
2070
|
#!/usr/bin/python3
from SettingsWidgets import SidePage
from xapp.GSettingsWidgets import *
class Module:
name = "workspaces"
category = "prefs"
comment = _("Manage workspace preferences")
def __init__(self, content_box):
keywords = _("workspace, osd, expo, monitor")
sidePage = SidePage(_("Workspaces"), "cs-workspaces", keywords, content_box, module=self)
self.sidePage = sidePage
def shouldLoad(self):
return True
def on_module_selected(self):
if not self.loaded:
print("Loading Workspaces module")
page = SettingsPage()
self.sidePage.add_widget(page)
settings = page.add_section(_("Workspace Options"))
switch = GSettingsSwitch(_("Enable workspace OSD"), "org.cinnamon", "workspace-osd-visible")
settings.add_row(switch)
switch = GSettingsSwitch(_("Allow cycling through workspaces"), "org.cinnamon.muffin", "workspace-cycle")
settings.add_row(switch)
switch = GSettingsSwitch(_("Only use workspaces on primary monitor (requires Cinnamon restart)"), "org.cinnamon.muffin", "workspaces-only-on-primary")
settings.add_row(switch)
switch = GSettingsSwitch(_("Display Expo view as a grid"), "org.cinnamon", "workspace-expo-view-as-grid")
settings.add_row(switch)
# Edge Flip doesn't work well, so it's there in gsettings, but we don't show it to users yet
# switch = GSettingsSwitch(_("Enable Edge Flip"), "org.cinnamon", "enable-edge-flip")
# settings.add_row(switch)
# spin = GSettingsSpinButton(_("Edge Flip delay"), "org.cinnamon", "edge-flip-delay", mini=1, maxi=3000, units=_("ms"))
# settings.add_reveal_row(spin, "org.cinnamon", "enable-edge-flip")
switch = GSettingsSwitch(_("Invert the left and right arrow key directions used to shift workspaces during a window drag"), "org.cinnamon.muffin", "invert-workspace-flip-direction")
settings.add_row(switch)
|
gpl-2.0
| 622,025,873,954,569,200
| 42.125
| 193
| 0.638164
| false
| 3.812155
| false
| false
| false
|
vmassuchetto/dnstorm
|
dnstorm/app/migrations/0005_auto__add_field_idea_description.py
|
1
|
16145
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Criteria.created'
db.add_column('dnstorm_criteria', 'created',
self.gf('django.db.models.fields.DateTimeField')(default='2000-01-01', auto_now_add=True, blank=True),
keep_default=False)
# Adding field 'Criteria.updated'
db.add_column('dnstorm_criteria', 'updated',
self.gf('django.db.models.fields.DateTimeField')(default='2000-01-01', auto_now=True, blank=True),
keep_default=False)
# Adding field 'Idea.description'
db.add_column('dnstorm_idea', 'description',
self.gf('django.db.models.fields.TextField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Criteria.created'
db.delete_column('dnstorm_criteria', 'created')
# Deleting field 'Criteria.updated'
db.delete_column('dnstorm_criteria', 'updated')
# Deleting field 'Idea.description'
db.delete_column('dnstorm_idea', 'description')
models = {
u'actstream.action': {
'Meta': {'ordering': "('-timestamp',)", 'object_name': 'Action'},
'action_object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'action_object'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'action_object_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'actor_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actor'", 'to': u"orm['contenttypes.ContentType']"}),
'actor_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data': ('jsonfield.fields.JSONField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'target'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'verb': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'app.alternative': {
'Meta': {'object_name': 'Alternative', 'db_table': "'dnstorm_alternative'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'alternative_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2001-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['app.Idea']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.comment': {
'Meta': {'object_name': 'Comment', 'db_table': "'dnstorm_comment'"},
'alternative': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Alternative']", 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Criteria']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Idea']", 'null': 'True', 'blank': 'True'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']", 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.criteria': {
'Meta': {'object_name': 'Criteria', 'db_table': "'dnstorm_criteria'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'criteria_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'fmt': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'min': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'order': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'result': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '60', 'populate_from': "'name'", 'unique_with': '()'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'}),
'weight': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'app.idea': {
'Meta': {'object_name': 'Idea', 'db_table': "'dnstorm_idea'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'idea_coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'problem': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Problem']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.ideacriteria': {
'Meta': {'object_name': 'IdeaCriteria', 'db_table': "'dnstorm_idea_criteria'"},
'criteria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Criteria']"}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['app.Idea']"}),
'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value_currency': ('django.db.models.fields.DecimalField', [], {'default': '0', 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'value_number': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'value_scale': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'value_time': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'app.invitation': {
'Meta': {'object_name': 'Invitation', 'db_table': "'dnstorm_invitation'"},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'app.option': {
'Meta': {'object_name': 'Option', 'db_table': "'dnstorm_option'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'value': ('django.db.models.fields.TextField', [], {})
},
u'app.problem': {
'Meta': {'object_name': 'Problem', 'db_table': "'dnstorm_problem'"},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'author'", 'to': u"orm['auth.User']"}),
'coauthor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'coauthor'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'collaborator': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'collaborator'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('ckeditor.fields.RichTextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'}),
'open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '60', 'populate_from': "'title'", 'unique_with': '()'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '90'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': "'2000-01-01'", 'auto_now': 'True', 'blank': 'True'})
},
u'app.vote': {
'Meta': {'object_name': 'Vote', 'db_table': "'dnstorm_vote'"},
'alternative': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_alternative'", 'null': 'True', 'to': u"orm['app.Alternative']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_comment'", 'null': 'True', 'to': u"orm['app.Alternative']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': "'2001-01-01'", 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'vote_idea'", 'null': 'True', 'to': u"orm['app.Idea']"}),
'value': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['app']
|
gpl-2.0
| -7,145,299,977,409,573,000
| 81.377551
| 208
| 0.552865
| false
| 3.579823
| false
| false
| false
|
czepluch/pysecp256k1
|
c_secp256k1/__init__.py
|
1
|
9618
|
import numbers
import struct
from glob import glob
from os import path
try:
from secrets import SystemRandom
random = SystemRandom()
except ImportError:
try:
from random import SystemRandom
random = SystemRandom()
except ImportError:
import random
from bitcoin import electrum_sig_hash as _b_electrum_sig_hash
from bitcoin import encode_sig as _b_encode_sig
from bitcoin import decode_sig as _b_decode_sig
from bitcoin import N, P
secpk1n = 115792089237316195423570985008687907852837564279074904382605163141518161494337
try:
from ._c_secp256k1 import ffi
except ImportError as e:
raise ImportError(
"CFFI extension not found. You need to install this package before use. %r" % e)
try:
obj_name = glob(path.abspath(path.join(path.dirname(__file__), "libsecp256k1*")))[0]
except Exception as e:
raise ImportError(
"secp256k1 lib not found. You need to run 'python setup.py build' or see README %r" % e)
lib = ffi.dlopen(obj_name)
# ffi definition of the context
ctx = lib.secp256k1_context_create(3)
# arbitrary data used by the nonce generation function
ndata = ffi.new("unsigned char[]", bytes(bytearray(random.getrandbits(8) for _ in range(32))))
# helpers
class InvalidPubkeyError(Exception):
pass
class InvalidSignatureError(Exception):
pass
class InvalidPrivateKeyError(Exception):
pass
if hasattr(int, 'to_bytes'):
def _int_to_big_endian32(value):
return value.to_bytes(32, byteorder='big')
else:
def _int_to_big_endian32(value):
cs = []
while value > 0:
cs.append(chr(value % 256))
value /= 256
s = b''.join(reversed(cs))
return b'\x00' * (32 - len(s)) + s
if hasattr(int, 'from_bytes'):
def _big_endian_to_int(value):
return int.from_bytes(value, byteorder='big')
else:
def _big_endian_to_int(value):
return int(value.encode('hex'), 16)
def _encode_sig(v, r, s):
assert isinstance(v, numbers.Integral)
assert v in (27, 28)
vb, rb, sb = bytes(bytearray((v - 27,))), _int_to_big_endian32(r), _int_to_big_endian32(s)
return rb + sb + vb
def _decode_sig(sig):
return ord(sig[64:65]) + 27, _big_endian_to_int(sig[0:32]), _big_endian_to_int(sig[32:64])
def _verify_seckey(seckey):
# Validate seckey
is_valid = lib.secp256k1_ec_seckey_verify(ctx, seckey)
return is_valid
def _deserialize_pubkey(pub):
pubkey = ffi.new("secp256k1_pubkey *")
# Return 1 if pubkey is valid
valid_pub = lib.secp256k1_ec_pubkey_parse(
ctx, # const secp256k1_context*
pubkey, # secp256k1_pubkey*
pub, # const unsigned char
len(pub) # size_t
)
if not valid_pub:
raise InvalidPubkeyError()
return pubkey
def _serialize_pubkey(pub):
serialized_pubkey = ffi.new("unsigned char[65]")
outputlen = ffi.new("size_t *")
# Serialize a pubkey object into a serialized byte sequence.
lib.secp256k1_ec_pubkey_serialize(
ctx,
serialized_pubkey,
outputlen,
pub,
0 # SECP256K1_EC_COMPRESSED
)
return serialized_pubkey
def _der_deserialize_signature(in_sig):
sig = ffi.new("secp256k1_ecdsa_signature *")
# Return 1 when signature could be parsed
valid_sig = lib.secp256k1_ecdsa_signature_parse_der(
ctx, # const secp256k1_context*
sig, # secp256k1_ecdsa_signature*
in_sig, # const unsigned char
len(in_sig) # size_t
)
if not valid_sig:
raise InvalidSignatureError()
return sig
def _der_serialize_signature(sig):
serialized_sig = ffi.new("unsigned char[65]")
outputlen = ffi.new("size_t *")
# Serialize a pubkey object into a serialized byte sequence.
serializeable = lib.secp256k1_ecdsa_signature_serialize_der(
ctx,
serialized_sig,
outputlen,
sig, # secp256k1_ecdsa_signature *
)
assert serializeable == 1
return serialized_sig
def _ecdsa_sign_recoverable(msg32, seckey):
"""
Takes a message of 32 bytes and a private key
Returns a recoverable signature of length 64
"""
assert isinstance(msg32, bytes)
assert isinstance(seckey, bytes)
assert len(msg32) == len(seckey) == 32
if not _verify_seckey(seckey):
raise InvalidPrivateKeyError()
# Make a recoverable signature of 65 bytes
sig64 = ffi.new("secp256k1_ecdsa_recoverable_signature *")
lib.secp256k1_ecdsa_sign_recoverable(
ctx,
sig64,
msg32,
seckey,
ffi.addressof(lib, "secp256k1_nonce_function_default"),
ndata,
)
return sig64
def _parse_to_recoverable_signature(sig):
"""
Returns a parsed recoverable signature of length 65 bytes
"""
# Buffer for getting values of signature object
assert isinstance(sig, bytes)
assert len(sig) == 65
# Make a recoverable signature of 65 bytes
rec_sig = ffi.new("secp256k1_ecdsa_recoverable_signature *")
# Retrieving the recid from the last byte of the signed key
recid = ord(sig[64:65])
# Parse a revoverable signature
parsable_sig = lib.secp256k1_ecdsa_recoverable_signature_parse_compact(
ctx,
rec_sig,
sig,
recid
)
# Verify that the signature is parsable
if not parsable_sig:
raise InvalidSignatureError()
return rec_sig
def _check_signature(sig_compact):
if not len(sig_compact) == 65:
raise InvalidSignatureError()
v, r, s = _decode_sig(sig_compact)
if r >= N or s >= P or v < 27 or v > 28 or r < 1 or s < 1 or s >= secpk1n:
raise InvalidSignatureError()
if not (r < secpk1n and s < secpk1n and (v == 27 or v == 28)):
raise InvalidSignatureError()
# compact encoding
def ecdsa_sign_compact(msg32, seckey):
"""
Takes the same message and seckey as _ecdsa_sign_recoverable
Returns an unsigned char array of length 65 containing the signed message
"""
# Assign 65 bytes to output
output64 = ffi.new("unsigned char[65]")
# ffi definition of recid
recid = ffi.new("int *")
lib.secp256k1_ecdsa_recoverable_signature_serialize_compact(
ctx,
output64,
recid,
_ecdsa_sign_recoverable(msg32, seckey)
)
# Assign recid to the last byte in the output array
r = ffi.buffer(output64)[:64] + struct.pack("B", recid[0])
assert len(r) == 65, len(r)
return r
def ecdsa_recover_compact(msg32, sig):
"""
Takes the a message and a parsed recoverable signature
Returns the serialized public key from the private key in the sign function
"""
assert isinstance(msg32, bytes)
assert len(msg32) == 32
_check_signature(sig)
# Check that recid is of valid value
recid = ord(sig[64:65])
if not (recid >= 0 and recid <= 3):
raise InvalidSignatureError()
# Setting the pubkey array
pubkey = ffi.new("secp256k1_pubkey *")
lib.secp256k1_ecdsa_recover(
ctx,
pubkey,
_parse_to_recoverable_signature(sig),
msg32
)
serialized_pubkey = _serialize_pubkey(pubkey)
buf = ffi.buffer(serialized_pubkey, 65)
r = buf[:]
assert isinstance(r, bytes)
assert len(r) == 65, len(r)
return r
def ecdsa_verify_compact(msg32, sig, pub):
"""
Takes a message of length 32 and a signed message and a pubkey
Returns True if the signature is valid
"""
assert isinstance(msg32, bytes)
assert len(msg32) == 32
# Check if pubkey has been bin_electrum encoded.
# If so, append \04 to the front of the key, to make sure the length is 65
if len(pub) == 64:
pub = b'\04'+pub
assert len(pub) == 65
_check_signature(sig)
# Setting the pubkey array
c_sig = ffi.new("secp256k1_ecdsa_signature *")
# converts the recoverable signature to a signature
lib.secp256k1_ecdsa_recoverable_signature_convert(
ctx,
c_sig,
_parse_to_recoverable_signature(sig)
)
is_valid = lib.secp256k1_ecdsa_verify(
ctx,
c_sig, # const secp256k1_ecdsa_signature
msg32, # const unsigned char
_deserialize_pubkey(pub) # const secp256k1_pubkey
)
return is_valid == 1
# raw encoding (v, r, s)
def ecdsa_sign_raw(rawhash, key):
"""
Takes a rawhash message and a private key and returns a tuple
of the v, r, s values.
"""
return _decode_sig(ecdsa_sign_compact(rawhash, key))
def ecdsa_recover_raw(rawhash, vrs):
"""
Takes a rawhash message of length 32 bytes and a (v, r, s) tuple
Returns a public key for the private key used in the sign function
"""
assert len(vrs) == 3
assert len(rawhash) == 32
return ecdsa_recover_compact(rawhash, _encode_sig(*vrs))
def ecdsa_verify_raw(msg32, vrs, pub):
"""
Takes a message, the signature being verified and a pubkey
Returns 1 if signature is valid with given pubkey
"""
# assert len(vrs) == 3
if len(vrs) == 3:
return ecdsa_verify_compact(msg32, _encode_sig(*vrs), pub)
else:
return ecdsa_verify_compact(msg32, vrs, pub)
# DER encoding
def ecdsa_sign_der(msg, seckey):
return _b_encode_sig(*ecdsa_sign_raw(_b_electrum_sig_hash(msg), seckey))
def ecdsa_recover_der(msg, sig):
return ecdsa_recover_raw(_b_electrum_sig_hash(msg), _b_decode_sig(sig))
def ecdsa_verify_der(msg, sig, pub):
return ecdsa_verify_raw(_b_electrum_sig_hash(msg), _b_decode_sig(sig), pub)
|
mit
| -740,912,916,495,049,200
| 26.797688
| 96
| 0.638698
| false
| 3.374737
| false
| false
| false
|
LamaHamadeh/Microsoft-DAT210x
|
Module 5/assignment3.py
|
1
|
6640
|
'''
author Lama Hamadeh
'''
import pandas as pd
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib
from sklearn.cluster import KMeans
matplotlib.style.use('ggplot') # Look Pretty
#
# INFO: This dataset has call records for 10 users tracked over the course of 3 years.
# Your job is to find out where the users likely live at!
'''
def showandtell(title=None):
if title != None: plt.savefig(title + ".png", bbox_inches='tight', dpi=300)
plt.show()
exit()
'''
def clusterInfo(model):
print ("Cluster Analysis Inertia: ", model.inertia_)
print ('------------------------------------------')
for i in range(len(model.cluster_centers_)):
print ("\n Cluster ", i)
print (" Centroid ", model.cluster_centers_[i])
print (" #Samples ", (model.labels_==i).sum()) # NumPy Power
# Find the cluster with the least # attached nodes
def clusterWithFewestSamples(model):
# Ensure there's at least on cluster...
minSamples = len(model.labels_)
minCluster = 0
for i in range(len(model.cluster_centers_)):
if minSamples > (model.labels_==i).sum():
minCluster = i
minSamples = (model.labels_==i).sum()
print ("\n Cluster With Fewest Samples: "), minCluster
return (model.labels_==minCluster)
def doKMeans(data, clusters=0):
#
# TODO: Be sure to only feed in Lat and Lon coordinates to the KMeans algo, since none of the other
# data is suitable for your purposes. Since both Lat and Lon are (approximately) on the same scale,
# no feature scaling is required. Print out the centroid locations and add them onto your scatter
# plot. Use a distinguishable marker and color.
#
# Hint: Make sure you fit ONLY the coordinates, and in the CORRECT order (lat first).
# This is part of your domain expertise.
#
# .. your code here ..
dataframe = pd.concat([data.TowerLon, data.TowerLat], axis = 1)
kmeans = KMeans(n_clusters=clusters)
labels = kmeans.fit_predict(dataframe)
# INFO: Print and plot the centroids...
centroids = kmeans.cluster_centers_
ax.scatter(x = centroids[:, 0], y = centroids[:, 1], marker='x', c='red', alpha=0.9, linewidths=3, s=250)
model = kmeans
return model
#
# TODO: Load up the dataset and take a peek at its head and dtypes.
# Convert the date using pd.to_datetime, and the time using pd.to_timedelta
#
# .. your code here ..
df=pd.read_csv('/Users/lamahamadeh/Downloads/Modules/DAT210x-master/Module5/Datasets/CDR.csv')
#print(df)
print(df.dtypes)
df.CallDate = pd.to_datetime(df.CallDate) # Converts the entries in the 'CallDate' column to datetime
df.CallTime = pd.to_timedelta(df.CallTime) # Converts the entries in the 'CallTime' column to timedelta
df.Duration = pd.to_timedelta(df.Duration) # Converts the entries in the 'Duration' column to timedelta
print(df.dtypes)
#
# TODO: Get a distinct list of "In" phone numbers (users) and store the values in a
# regular python list (i.e., numpy.ndarray).
# Hint: https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.tolist.html
#
# .. your code here ..
in_numbers = df.In.unique() #distinct == unique
#print(in_numbers)
print(type(in_numbers)) #numpy.ndarray
#
# INFO: The locations map above should be too "busy" to really wrap your head around. This
# is where domain expertise comes into play. Your intuition tells you that people are likely
# to behave differently on weekends:
#
# On Weekdays:
# 1. People probably don't go into work
# 2. They probably sleep in late on Saturday
# 3. They probably run a bunch of random errands, since they couldn't during the week
# 4. They should be home, at least during the very late hours, e.g. 1-4 AM
#
# On Weekdays:
# 1. People probably are at work during normal working hours
# 2. They probably are at home in the early morning and during the late night
# 3. They probably spend time commuting between work and home everyday
#print ("\n\nExamining person: ", 0)
#
# TODO: Create a slice called user1 that filters to only include dataset records where the
# "In" feature (user phone number) is equal to the first number on your unique list above
#
# .. your code here ..
user1 = df[(df.In == in_numbers[0])]
print(user1)
#
# TODO: Alter your slice so that it includes only Weekday (Mon-Fri) values.
#
# .. your code here ..
user1 = user1[(user1.DOW == 'Mon') | (user1.DOW == 'Tue')| (user1.DOW == 'Wed')| (user1.DOW == 'Thu')
| (user1.DOW == 'Fri')]
print(user1)
#
# TODO: The idea is that the call was placed before 5pm. From Midnight-730a, the user is
# probably sleeping and won't call / wake up to take a call. There should be a brief time
# in the morning during their commute to work, then they'll spend the entire day at work.
# So the assumption is that most of the time is spent either at work, or in 2nd, at home.
#
# .. your code here ..
user1 = user1[(user1.CallTime < '17:00:00')]
print(user1)
print(len(user1))
#
# TODO: Plot the Cell Towers the user connected to
#
# .. your code here ..
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(user1.TowerLon,user1.TowerLat, c='g', marker='o', alpha=0.2)
ax.set_title('Weekdays Calls (<5p.m)')
#
# INFO: Run K-Means with K=3 or K=4. There really should only be a two areas of concentration. If you
# notice multiple areas that are "hot" (multiple areas the usr spends a lot of time at that are FAR
# apart from one another), then increase K=5, with the goal being that all centroids except two will
# sweep up the annoying outliers and not-home, not-work travel occasions. the other two will zero in
# on the user's approximate home location and work locations. Or rather the location of the cell
# tower closest to them.....
model = doKMeans(user1, 3)
#
# INFO: Print out the mean CallTime value for the samples belonging to the cluster with the LEAST
# samples attached to it. If our logic is correct, the cluster with the MOST samples will be work.
# The cluster with the 2nd most samples will be home. And the K=3 cluster with the least samples
# should be somewhere in between the two. What time, on average, is the user in between home and
# work, between the midnight and 5pm?
midWayClusterIndices = clusterWithFewestSamples(model)
midWaySamples = user1[midWayClusterIndices]
print ("Its Waypoint Time: ", midWaySamples.CallTime.mean())
#
# Let's visualize the results!
# First draw the X's for the clusters:
ax.scatter(model.cluster_centers_[:,0], model.cluster_centers_[:,1], s=169, c='r', marker='x', alpha=0.8, linewidths=2)
#
# Then save the results:
#showandtell('Weekday Calls Centroids') # Comment this line out when you're ready to proceed
plt.show()
|
mit
| -1,770,293,944,817,420,300
| 33.764398
| 119
| 0.708434
| false
| 3.316683
| false
| false
| false
|
SiniAghilas/scripts
|
source/python/script_martin_algorithm.py
|
1
|
5564
|
#!/usr/bin/jython
import os
import sys
from java.io import FileOutputStream
import logging
import optparse
import getopt
import string
__all__ = []
__version__ = 0.1
__date__ = '15-03-2015'
__updated__ ='21-03-2016'
__author__='asini'
## related path
if os.path.dirname(sys.argv[0])!= "":
directery_name=os.path.dirname(sys.argv[0])+"/"
else :
directery_name="";
#load class (binary path)
os.sys.path.append(directery_name+"bin")
#build path directory
def get_filepaths(directory):
"""
This function will generate the file names in a directory
tree by walking the tree either top-down or bottom-up. For each
directory in the tree rooted at directory top (including top itself),
it yields a 3-tuple (dirpath, dirnames, filenames).
"""
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
# Run the above function and store its results in a variable.
jar_files_paths = get_filepaths(directery_name+"lib")
# load all jar file
for jarfilename in jar_files_paths:
os.sys.path.append(jarfilename)
# import Library
import org.netlib.lapack
from fr.loria.parole.jsnoori.model.speech.pitch import Pitch
from fr.loria.parole.jsnoori.model.speech.pitch import AsyncPitch
from fr.loria.parole.jsnoori.model.speech import Spectrogram
from java.util import Vector
from fr.loria.parole.jsnoori.util.file.segmentation import TextGridSegmentationFileUtils
from fr.loria.parole.jsnoori.model import ResourcePath
from fr.loria.parole.jsnoori.model import JSnooriProperties
from fr.loria.parole.jsnoori.model import Constants
from fr.loria.parole.jsnoori.model.audio import AudioSignal
from fr.loria.parole.jsnoori.model import Constants
from fr.loria.parole.jsnoori.util import Energy
from fr.loria.parole.jsnoori.util import TimeConversion
## Options
## Options
#option -1=input file
#option 0 =output file
# option 1= sexe of speaker
# option 2= time scale's (ms,s)
# option 3= print on file txt or console
parser=optparse.OptionParser()
parser.add_option("-i", dest="input",type="string",default=None,help="take input file",)
parser.add_option("-o",dest="output",type="string",default=None,help="write output to file")
parser.add_option("-w",dest="window",type="int",default=32,help="size window (Ms)")
parser.add_option("-t",dest="shift",type="int",default=8,help="time shift (Ms)")
parser.add_option("--ts",dest="scale",type="string",default="ms",help="define time scale's")
parser.add_option("-s",dest="sexe",type="int",default=1,help="choose sexe of speakers ")
parser.add_option("-p",dest="print",help="print result on console")
parser.add_option("--dbmin",dest='dbmin',default=60.0,type="float",help="lowest energy (db)",)
#print pitchs result
def __print__(pitchs,f0median,fmin,fmax, filename,inputfile ,signal,sexe,shift):
#open file
f=open(filename,"w")
t=0
f.write("File name: "+inputfile+"\n")
f.write(str(signal))
f.write("sexe: "+str(sexe)+"\n")
f.write("f0median: "+str(f0median)+"\n")
f.write("fmax: "+str(fmax)+"\n")
f.write("fmin: "+str(fmin)+"\n")
f.write("f00_corrv\tf00_hz\t\tf01_corrv\tf01_hz\t\tf01_corrv\tf02_hz\n")
for k in range(0,len(pitchs)):
line='%.3f\t\t%d\t\t%.3f\t\t%d\t\t%.3f\t\t%d'%(pitchs.get(k).fst[0],pitchs.get(k).snd[0],pitchs.get(k).fst[1],pitchs.get(k).snd[1],pitchs.get(k).fst[2],pitchs.get(k).snd[2])
f.write(line+"\n")
t+=shift
f.close()
# check options
# opts, args = getopt.getopt(sys.argv[1:], "ho:v", ["help", "output="])
# print len(args)
try:
(options, args)=parser.parse_args()
if options.input==None:
parser.error('this command cannot be empty')
except Exception, e:
raise e
return options,args
try:
command=options.input
except Exception, e:
command=None
if command!=None:
# load wave signal
signal=AudioSignal(command)
# pitch time shift
timeShift=options.shift
# pitch Window
window=options.window
# Pitch's Object
pitch=Pitch(32,timeShift)
# male: 1; female: 0; unknow: -1.
sexe=options.sexe
# compute pitchs
pitchs=pitch.computePitch(signal,sexe)
# compute median F0
<<<<<<< HEAD:script_martin_algorithm.py
f0median=pitch.pitchMedian()
# get f0 minmale
fmin=pitch.getF0Min()
# get f0 maximale
fmax=pitch.getF0Max()
# candidate
candidatesList=pitch.getPitchs();
# print int file
=======
pitch.pitchMedian();
#candidate
candidatesList=pitch.getPitchs()
# pitch size
pitch_count=len(candidatesList)
# Conversion frome time to samples
sampleSift=int(TimeConversion.enEchtf(timeShift, signal))
# new window
windowUp=signal.getSampleCount()-(sampleSift*pitch_count)
#compute energy
energy= energy=Energy(wav,options.fmin, options.fmax, options.dbmin, options.durationwindow,options.fftOrder,options.timeshift)
>>>>>>> 21507d8a85c4fc76c044b5b886a96cdcae618976:source/python/script_martin_algorithm.py
if(options.output!=None):
__print__(candidatesList,f0median,fmin,fmax,options.output,options.input,signal,sexe,timeShift)
else:
print "error"
|
gpl-3.0
| -2,692,751,104,350,632,400
| 31.923077
| 183
| 0.682063
| false
| 3.060506
| false
| false
| false
|
hankcs/HanLP
|
hanlp/components/mtl/tasks/tok/reg_tok.py
|
1
|
5225
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-08-02 16:51
import logging
from typing import Union, List, Dict, Any, Iterable, Tuple
import torch
from alnlp.modules import util
from torch import Tensor
from torch.utils.data import DataLoader
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp.common.transform import FieldLength, TransformList
from hanlp.components.mtl.tasks import Task
from hanlp.datasets.tokenization.txt import TextTokenizingDataset
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.layers.transformers.pt_imports import PreTrainedTokenizer
from hanlp.metrics.chunking.binary_chunking_f1 import BinaryChunkingF1
from hanlp.transform.transformer_tokenizer import TransformerSequenceTokenizer
from hanlp_common.util import merge_locals_kwargs
def generate_token_span_tuple(sample: dict):
prefix_mask = sample.get('text_prefix_mask', None)
if prefix_mask:
sample['span_tuple'] = spans = []
previous_prefix = 0
prefix_mask_ = prefix_mask[1:-1]
for i, mask in enumerate(prefix_mask_):
if i and mask:
spans.append((previous_prefix, i))
previous_prefix = i
spans.append((previous_prefix, len(prefix_mask_)))
return sample
class RegressionTokenizingDecoder(torch.nn.Linear):
def __init__(self, in_features: int, out_features: int = 1, bias: bool = ...) -> None:
super().__init__(in_features, out_features, bias)
# noinspection PyMethodOverriding
def forward(self, input: Tensor, **kwargs) -> Tensor:
return super().forward(input[:, 1:-1, :]).squeeze_(-1)
class RegressionTokenization(Task):
def __init__(self, trn: str = None, dev: str = None, tst: str = None, sampler_builder: SamplerBuilder = None,
dependencies: str = None, scalar_mix: ScalarMixWithDropoutBuilder = None,
use_raw_hidden_states=True, lr=1e-3, separate_optimizer=False, delimiter=None,
max_seq_len=None, sent_delimiter=None) -> None:
super().__init__(**merge_locals_kwargs(locals()))
def build_criterion(self, **kwargs):
return torch.nn.BCEWithLogitsLoss(reduction='mean')
def build_metric(self, **kwargs):
return BinaryChunkingF1()
# noinspection PyMethodOverriding
def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module:
return RegressionTokenizingDecoder(encoder_size)
def predict(self, data: Union[str, List[str]], batch_size: int = None, **kwargs):
pass
def build_dataloader(self,
data,
transform: TransformList = None,
training=False,
device=None,
logger: logging.Logger = None,
tokenizer: PreTrainedTokenizer = None,
**kwargs) -> DataLoader:
assert tokenizer
dataset = TextTokenizingDataset(data, cache=isinstance(data, str), delimiter=self.config.sent_delimiter,
generate_idx=isinstance(data, list),
max_seq_len=self.config.max_seq_len,
sent_delimiter=self.config.sent_delimiter,
transform=[
TransformerSequenceTokenizer(tokenizer,
'text',
ret_prefix_mask=True,
ret_subtokens=True,
),
FieldLength('text_input_ids', 'text_input_ids_length', delta=-2),
generate_token_span_tuple])
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset, 'text_input_ids', 'text'),
shuffle=training),
device=device,
dataset=dataset)
def decode_output(self,
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
batch: Dict[str, Any], **kwargs) -> List[Tuple[int, int]]:
spans = BinaryChunkingF1.decode_spans(output > 0, batch['text_input_ids_length'])
return spans
def update_metrics(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: List[Tuple[int, int]], metric: BinaryChunkingF1):
metric.update(prediction, batch['span_tuple'])
def compute_loss(self, batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any], criterion):
mask = util.lengths_to_mask(batch['text_input_ids_length'])
return criterion(output[mask], batch['text_prefix_mask'][:, 1:-1][mask].to(torch.float))
|
apache-2.0
| 9,092,129,869,008,547,000
| 46.93578
| 115
| 0.576459
| false
| 4.328915
| false
| false
| false
|
CSIRT-MU/Stream4Flow
|
applications/detection/ddos/spark/detection_ddos.py
|
1
|
10022
|
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2016 Michal Stefanik <stefanik.m@mail.muni.cz>, Milan Cermak <cermak@ics.muni.cz>
# Institute of Computer Science, Masaryk University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
Description: A method for detection of DoS/DDoS attacks based on an evaluation of
the incoming/outgoing packet volume ratio and its variance to the long-time (long window) ratio.
Usage:
detection_ddos.py -iz <input-zookeeper-hostname>:<input-zookeeper-port> -it <input-topic>
-oz <output-zookeeper-hostname>:<output-zookeeper-port> -ot <output-topic> -nf <regex for network range>
To run this on the Stream4Flow, you need to receive flows by IPFIXCol and make them available via Kafka topic. Then
you can run the example
$ /home/spark/applications/run-application.sh detection/ddos/spark/detection_ddos.py
-iz producer:2181 -it ipfix.entry -oz producer:9092 -ot results.output -nf "10\.10\..+"
"""
import sys # Common system functions
import os # Common operating system functions
import argparse # Arguments parser
import ujson as json # Fast JSON parser
import socket # Socket interface
import re # Regular expression match
from termcolor import cprint # Colors in the console output
from pyspark import SparkContext # Spark API
from pyspark.streaming import StreamingContext # Spark streaming API
from pyspark.streaming.kafka import KafkaUtils # Spark streaming Kafka receiver
from kafka import KafkaProducer # Kafka Python client
def send_to_kafka(data, producer, topic):
"""
Send given data to the specified kafka topic.
:param data: data to send
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
"""
producer.send(topic, str(data))
def print_and_send(rdd, producer, topic):
"""
Transform given computation results into the JSON format and send them to the specified host.
JSON format:
{"@type": "detection.ddos", "host" : <destination_ip> "shortratio" : <short-term ratio>,
"longratio": <long-term ration>, "attackers": [set of attackers]}
:param rdd: rdd to be parsed and sent
:param producer: producer that sends the data
:param topic: name of the receiving kafka topic
"""
results = ""
rdd_map = rdd.collectAsMap()
# generate JSON response for each aggregated rdd
for host, stats in rdd_map.iteritems():
short_ratio = float(stats[0][0]) / stats[0][1]
long_ratio = float(stats[1][0]) / stats[1][1]
attackers = list(stats[0][2])
new_entry = {"@type": "detection.ddos",
"dst_ip": host,
"shortratio": short_ratio,
"longratio": long_ratio,
"attackers": attackers}
results += ("%s\n" % json.dumps(new_entry))
# Print results to stdout
cprint(results)
# Send results to the specified kafka topic
send_to_kafka(results, producer, topic)
def inspect_ddos(stream_data):
"""
Main method performing the flows aggregation in short and long window and comparison of their ratios
:type stream_data: Initialized spark streaming context.
"""
# Create regex for monitored network
local_ip_pattern = re.compile(network_filter)
# Filter only the data with known source and destination IP
filtered_stream_data = stream_data \
.map(lambda x: json.loads(x[1])) \
.filter(lambda json_rdd: ("ipfix.sourceIPv4Address" in json_rdd.keys() and
"ipfix.destinationIPv4Address" in json_rdd.keys()
))
# Create stream of base windows
small_window = filtered_stream_data.window(base_window_length, base_window_length)
# Count number of incoming packets from each source ip address for each destination ip address
# from a given network range
incoming_small_flows_stats = small_window \
.filter(lambda json_rdd: re.match(local_ip_pattern, json_rdd["ipfix.destinationIPv4Address"])) \
.map(lambda json_rdd: (json_rdd["ipfix.destinationIPv4Address"],
(json_rdd["ipfix.packetDeltaCount"], 0, {json_rdd["ipfix.sourceIPv4Address"]})))
# Count number of outgoing packets for each source ip address from a given network range
outgoing_small_flows_stats = small_window \
.filter(lambda json_rdd: re.match(local_ip_pattern, json_rdd["ipfix.sourceIPv4Address"])) \
.map(lambda json_rdd: (json_rdd["ipfix.sourceIPv4Address"],
(0, json_rdd["ipfix.packetDeltaCount"], set()))) \
# Merge DStreams of incoming and outgoing number of packets
small_window_aggregated = incoming_small_flows_stats.union(outgoing_small_flows_stats)\
.reduceByKey(lambda actual, update: (actual[0] + update[0],
actual[1] + update[1],
actual[2].union(update[2])))
# Create long window for long term profile
union_long_flows = small_window_aggregated.window(long_window_length, base_window_length)
long_window_aggregated = union_long_flows.reduceByKey(lambda actual, update: (actual[0] + update[0],
actual[1] + update[1])
)
# Union DStreams with small and long window
# RDD in DStream in format (local_device_IPv4, (
# (short_inc_packets, short_out_packets, short_source_IPv4s),
# (long_inc_packets, long_out_packets)))
windows_union = small_window_aggregated.join(long_window_aggregated)
# Filter out zero values to prevent division by zero
nonzero_union = windows_union.filter(lambda rdd: rdd[1][0][1] != 0 and rdd[1][1][1] != 0)
# Compare incoming and outgoing transfers volumes and filter only those suspicious
# -> overreaching the minimal_incoming volume of packets and
# -> short-term ratio is greater than long-term ratio * threshold
windows_union_filtered = nonzero_union.filter(lambda rdd: rdd[1][0][0] > minimal_incoming and
float(rdd[1][0][0]) / rdd[1][0][1] > float(rdd[1][1][0]) /
rdd[1][1][1] * threshold
)
# Return the detected records
return windows_union_filtered
if __name__ == "__main__":
# Prepare arguments parser (automatically creates -h argument).
parser = argparse.ArgumentParser()
parser.add_argument("-iz", "--input_zookeeper", help="input zookeeper hostname:port", type=str, required=True)
parser.add_argument("-it", "--input_topic", help="input kafka topic", type=str, required=True)
parser.add_argument("-oz", "--output_zookeeper", help="output zookeeper hostname:port", type=str, required=True)
parser.add_argument("-ot", "--output_topic", help="output kafka topic", type=str, required=True)
parser.add_argument("-nf", "--network_filter", help="regular expression filtering the watched IPs", type=str, required=True)
# Parse arguments.
args = parser.parse_args()
# Set variables
application_name = os.path.basename(sys.argv[0]) # Application name used as identifier
kafka_partitions = 1 # Number of partitions of the input Kafka topic
# Set method parameters:
threshold = 50 # Minimal increase of receive/sent packets ratio
minimal_incoming = 100000 # Minimal count of incoming packets
long_window_length = 7200 # Window length for average ratio computation (must be a multiple of microbatch interval)
base_window_length = 30 # Window length for basic computation (must be a multiple of microbatch interval)
network_filter = args.network_filter # Filter for network for detection (regex filtering), e.g. "10\.10\..+"
# Spark context initialization
sc = SparkContext(appName=application_name + " " + " ".join(sys.argv[1:])) # Application name used as the appName
ssc = StreamingContext(sc, 1) # Spark microbatch is 1 second
# Initialize input DStream of flows from specified Zookeeper server and Kafka topic
input_stream = KafkaUtils.createStream(ssc, args.input_zookeeper, "spark-consumer-" + application_name,
{args.input_topic: kafka_partitions})
# Run the detection of ddos
ddos_result = inspect_ddos(input_stream)
# Initialize kafka producer
kafka_producer = KafkaProducer(bootstrap_servers=args.output_zookeeper,
client_id="spark-producer-" + application_name)
# Process the results of the detection and send them to the specified host
ddos_result.foreachRDD(lambda rdd: print_and_send(rdd, kafka_producer, args.output_topic))
# Send any remaining buffered records
kafka_producer.flush()
# Start input data processing
ssc.start()
ssc.awaitTermination()
|
mit
| -2,848,101,401,985,156,600
| 45.398148
| 128
| 0.669627
| false
| 4.042759
| false
| false
| false
|
pony012/PruebaServicioCucea
|
app/app.py
|
1
|
2764
|
# from flask import Flask
from flask import render_template, redirect, url_for, flash
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import Required, Length, EqualTo, DataRequired
# from flask_mongoengine import MongoEngine
from flask_security import current_user, login_user
from flask_security.core import UserMixin, AnonymousUser
import config
from db import user_datastore
# from models.User import User
# from models.Role import Role
from models.Usuario import Usuario
app = config.app
db_sql = config.db_sql
# Create a user to test with
@app.before_first_request
def create_user():
db_sql.drop_all()
db_sql.create_all()
user_datastore.create_user(email='alan', password='password')
user_datastore.commit()
# if(User.objects.filter(email='matt@nobien.net').count() == 0):
# db.security.datastore.create_user(email='matt@nobien.net',
# password='password')
class LoginForm2(FlaskForm):
email = StringField('Correo', validators=[Required(), Length(1, 64)])
password = PasswordField('Password', validators=[Required()])
remember_me = BooleanField('Recordar', validators=[Required()])
submit = SubmitField('Login')
@app.route('/login', methods=['GET', 'POST'])
def login():
"""User login route."""
if current_user.is_authenticated():
# if user is logged in we get out of here
return redirect(url_for('index'))
form = LoginForm2()
if form.validate_on_submit():
user = Usuario.query.filter_by(username=form.email.data).first()
if user is None or not user.verify_password(form.password.data) or \
not user.verify_totp(form.token.data):
flash('Invalid username, password or token.')
return redirect(url_for('login'))
# log user in
login_user(user)
flash('You are now logged in!')
return redirect(url_for('index'))
print form
print "Form"
return render_template('login_user.html', form2=form)
class user_role_form(FlaskForm):
user = StringField(u'Usuario', validators=[DataRequired])
role = StringField(u'Rol', validators=[DataRequired])
submit = SubmitField(label="Ligar")
@app.route('/user_role/<user>/<role>')
def user_role(user, role):
form = user_role_form()
return render_template('user_role.html', form=form, user=user, role=role)
# app.add_url_rule('/user_role/<user>/<role>', view_func=user_role)
# Views
@app.route('/')
# @login_required
def home():
user = UserMixin
if user.is_anonymous:
user = AnonymousUser
return render_template('index.html', user=user)
if __name__ == '__main__':
app.run()
|
mit
| -703,852,059,490,693,200
| 30.770115
| 77
| 0.671852
| false
| 3.656085
| false
| false
| false
|
utarsuno/quasar_source
|
deprecated/c_processes/c_compiler.py
|
1
|
1140
|
# coding=utf-8
"""This module, c_compiler.py, is a utility program to compiling c programs."""
from universal_code.shell_abstraction.shell_command_runner import run_shell_command_and_get_results
def create_object_file(source_file_path, object_output_path):
"""Creates an object file."""
return _run_command_and_return_output('gcc -c ' + source_file_path + ' -o ' + object_output_path)
def create_executable(source_file_path, c_libraries, object_output_path):
"""Creates a new executable file."""
object_file_paths_as_string = ''
for o in c_libraries:
object_file_paths_as_string += ' ' + o.path_to_object_file + ' '
return _run_command_and_return_output('gcc -Wall -O2 ' + source_file_path + ' ' + object_file_paths_as_string + ' -o ' + object_output_path)
def _run_command_and_return_output(shell_command):
"""Runs the provided shell command."""
output_stdout, output_stderr = run_shell_command_and_get_results(shell_command)
output_stdout = output_stdout.decode('utf-8')
output_stderr = output_stderr.decode('utf-8')
if len(output_stderr):
print('ERROR')
print(output_stderr)
return output_stdout, output_stderr
|
mit
| -659,934,029,010,199,700
| 38.310345
| 141
| 0.716667
| false
| 3.048128
| false
| false
| false
|
MSEMJEJME/Get-Dumped
|
renpy/easy.py
|
1
|
4218
|
# Copyright 2004-2012 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Functions that make the user's life easier.
import renpy.display
import contextlib
import time
def color(c):
"""
This function returns a color tuple, from a hexcode string or a
color tuple.
"""
if isinstance(c, tuple) and len(c) == 4:
return c
if c is None:
return c
if isinstance(c, basestring):
if c[0] == '#':
c = c[1:]
if len(c) == 6:
r = int(c[0]+c[1], 16)
g = int(c[2]+c[3], 16)
b = int(c[4]+c[5], 16)
a = 255
elif len(c) == 8:
r = int(c[0]+c[1], 16)
g = int(c[2]+c[3], 16)
b = int(c[4]+c[5], 16)
a = int(c[6]+c[7], 16)
elif len(c) == 3:
r = int(c[0], 16) * 0x11
g = int(c[1], 16) * 0x11
b = int(c[2], 16) * 0x11
a = 255
elif len(c) == 4:
r = int(c[0], 16) * 0x11
g = int(c[1], 16) * 0x11
b = int(c[2], 16) * 0x11
a = int(c[3], 16) * 0x11
else:
raise Exception("Color string must be 3, 4, 6, or 8 hex digits long.")
return (r, g, b, a)
raise Exception("Not a color: %r" % (c,))
def displayable_or_none(d):
if isinstance(d, renpy.display.core.Displayable):
return d
if d is None:
return d
if isinstance(d, basestring):
if d[0] == '#':
return renpy.store.Solid(d)
elif "." in d:
return renpy.store.Image(d)
elif not d:
raise Exception("Displayable cannot be an empty string.")
else:
return renpy.store.ImageReference(tuple(d.split()))
# We assume the user knows what he's doing in this case.
if hasattr(d, 'parameterize'):
return d
if d is True or d is False:
return d
raise Exception("Not a displayable: %r" % (d,))
def displayable(d):
"""
:doc: udd_utility
:name: renpy.displayable
This takes `d`, which may be a displayable object or a string. If it's
a string, it converts that string into a displayable using the usual
rules.
"""
if isinstance(d, renpy.display.core.Displayable):
return d
if isinstance(d, basestring):
if not d:
raise Exception("An empty string cannot be used as a displayable.")
elif d[0] == '#':
return renpy.store.Solid(d)
elif "." in d:
return renpy.store.Image(d)
else:
return renpy.store.ImageReference(tuple(d.split()))
# We assume the user knows what he's doing in this case.
if hasattr(d, 'parameterize'):
return d
if d is True or d is False:
return d
raise Exception("Not a displayable: %r" % (d,))
def predict(d):
d = renpy.easy.displayable_or_none(d)
if d is not None:
renpy.display.predict.displayable(d)
@contextlib.contextmanager
def timed(name):
start = time.time()
yield
print "{0}: {1:.2f} ms".format(name, (time.time() - start) * 1000.0)
|
gpl-2.0
| 1,460,786,579,161,761,800
| 28.704225
| 82
| 0.584637
| false
| 3.629948
| false
| false
| false
|
carbonblack/cb-event-duplicator
|
cbopensource/tools/eventduplicator/transporter.py
|
1
|
14136
|
from __future__ import absolute_import, division, print_function
import logging
import datetime
from cbopensource.tools.eventduplicator.utils import get_process_id, get_parent_process_id
import sys
__author__ = 'jgarman'
log = logging.getLogger(__name__)
class Transporter(object):
def __init__(self, input_source, output_sink, tree=False):
self.input_md5set = set()
self.input_proc_guids = set()
self.input = input_source
self.output = output_sink
self.mungers = [CleanseSolrData()]
self.seen_sensor_ids = set()
self.seen_feeds = set()
self.seen_feed_ids = set()
self.traverse_tree = tree
def add_anonymizer(self, munger):
self.mungers.append(munger)
def output_process_doc(self, doc):
for munger in self.mungers:
doc = munger.munge_document('proc', doc)
sys.stdout.write('%-70s\r' % ("Uploading process %s..." % get_process_id(doc)))
sys.stdout.flush()
self.output.output_process_doc(doc)
def output_feed_doc(self, doc):
for munger in self.mungers:
doc = munger.munge_document('feed', doc)
# check if we have seen this feed_id before
feed_id = doc['feed_id']
if feed_id not in self.seen_feed_ids:
feed_metadata = self.input.get_feed_metadata(feed_id)
if feed_metadata:
# note that without feed metadata, bad things may happen on the Cb UI side...
self.output.output_feed_metadata(feed_metadata)
self.seen_feed_ids.add(feed_id)
self.output.output_feed_doc(doc)
def output_binary_doc(self, doc):
for munger in self.mungers:
# note that the mungers are mutating the data in place, anyway.
doc = munger.munge_document('binary', doc)
sys.stdout.write('%-70s\r' % ("Uploading binary %s..." % doc['md5']))
sys.stdout.flush()
self.output.output_binary_doc(doc)
def output_sensor_info(self, doc):
for munger in self.mungers:
# note that the mungers are mutating the data in place, anyway.
doc['sensor_info'] = munger.munge_document('sensor', doc['sensor_info'])
self.output.output_sensor_info(doc)
def update_sensors(self, proc):
sensor_id = proc.get('sensor_id', 0)
if not sensor_id:
return []
if sensor_id and sensor_id not in self.seen_sensor_ids:
# notify caller that this sensor_id has to be inserted into the target
self.seen_sensor_ids.add(sensor_id)
return [sensor_id]
return []
def update_md5sums(self, proc):
md5s = set()
process_md5 = proc.get('process_md5', None)
if process_md5 and process_md5 != '0'*32:
md5s.add(proc.get('process_md5'))
for modload_complete in proc.get('modload_complete', []):
fields = modload_complete.split('|')
md5s.add(fields[1])
retval = md5s - self.input_md5set
self.input_md5set |= md5s
return retval
def traverse_up(self, guid):
# TODO: this prompts a larger issue of - how do we handle process segments?
total = []
for proc in self.input.get_process_docs('unique_id:%s' % (guid,)):
process_id = get_process_id(proc)
if process_id not in self.input_proc_guids:
self.input_proc_guids.add(process_id)
total.append(proc)
parent_process_id = get_parent_process_id(proc)
if parent_process_id and parent_process_id not in self.input_proc_guids:
total.extend(self.traverse_up(parent_process_id))
return total
def traverse_down(self, guid):
total = []
for proc in self.input.get_process_docs('parent_unique_id:%s' % (guid,)):
process_id = get_process_id(proc)
if process_id not in self.input_proc_guids:
self.input_proc_guids.add(process_id)
total.append(proc)
total.extend(self.traverse_down(process_id))
return total
def traverse_up_down(self, proc):
# TODO: infinite recursion prevention
parent_process_id = get_parent_process_id(proc)
process_id = get_process_id(proc)
total = []
# get parents
if parent_process_id:
total.extend(self.traverse_up(parent_process_id))
total.extend(self.traverse_down(process_id))
for proc in total:
yield proc
def get_process_docs(self):
for proc in self.input.get_process_docs():
process_id = get_process_id(proc)
if process_id not in self.input_proc_guids:
self.input_proc_guids.add(get_process_id(proc))
yield proc
if self.traverse_tree:
for tree_proc in self.traverse_up_down(proc):
yield tree_proc
def update_feeds(self, doc):
feed_keys = [k for k in doc.keys() if k.startswith('alliance_data_')]
feed_lookup = set()
for key in feed_keys:
feed_name = key[14:]
for doc_name in doc[key]:
feed_lookup.add("%s:%s" % (feed_name, doc_name))
retval = feed_lookup - self.seen_feeds
self.seen_feeds |= feed_lookup
return retval
@staticmethod
def generate_fake_sensor(sensor_id):
sensor = {'build_info': {'architecture': 32,
'build_version': 50106,
'id': 9,
'installer_avail': True,
'major_version': 5,
'minor_version': 0,
'patch_version': 0,
'upgrader_avail': True,
'version_string': '005.000.000.50106'},
'os_info': {'architecture': 32,
'display_string': 'Windows 7 Ultimate Edition Service Pack 1, 32-bit',
'id': 1,
'major_version': 6,
'minor_version': 1,
'os_type': 1,
'product_type': 1,
'service_pack': 'Service Pack 1',
'suite_mask': 256},
'sensor_info': {'boot_id': 17,
'build_id': 9,
'clock_delta': 2654783,
'computer_dns_name': 'sensor%d' % sensor_id,
'computer_name': 'sensor%d' % sensor_id,
'computer_sid': 'S-1-5-21-2002419555-2189168078-3210101973',
'cookie': 1962833602,
'display': True,
'emet_dump_flags': None,
'emet_exploit_action': None,
'emet_is_gpo': False,
'emet_process_count': 0,
'emet_report_setting': None,
'emet_telemetry_path': None,
'emet_version': None,
'event_log_flush_time': None,
'group_id': 1,
'id': sensor_id,
'last_checkin_time': datetime.datetime(2015, 6, 30, 6, 9, 15, 570570),
'last_update': datetime.datetime(2015, 6, 30, 6, 9, 18, 170552),
'license_expiration': datetime.datetime(1990, 1, 1, 0, 0),
'network_adapters': '192.168.10.241,000c19e962f6|192.168.10.5,000c23b742dc|',
'network_isolation_enabled': False,
'next_checkin_time': datetime.datetime(2015, 6, 30, 6, 9, 45, 564598),
'node_id': 0,
'notes': None,
'num_eventlog_bytes': 400,
'num_storefiles_bytes': 10304408,
'os_environment_id': 1,
'parity_host_id': 2,
'physical_memory_size': 1073209344,
'power_state': 0,
'registration_time': datetime.datetime(2015, 1, 23, 15, 39, 54, 911720),
'restart_queued': False,
'sensor_health_message': 'Healthy',
'sensor_health_status': 100,
'sensor_uptime': 2976455,
'session_token': 0,
'supports_2nd_gen_modloads': False,
'supports_cblr': True,
'supports_isolation': True,
'systemvolume_free_size': 49276923904,
'systemvolume_total_size': 64422408192,
'uninstall': False,
'uninstalled': None,
'uptime': 340776}}
return sensor
def transport(self, debug=False):
# TODO: multithread this so we have some parallelization
log.info("Starting transport from %s to %s" % (self.input.connection_name(), self.output.connection_name()))
input_version = self.input.get_version()
if not self.output.set_data_version(input_version):
raise Exception("Input and Output versions are incompatible")
# get process list
for i, proc in enumerate(self.get_process_docs()):
new_md5sums = self.update_md5sums(proc)
new_sensor_ids = self.update_sensors(proc)
new_feed_ids = self.update_feeds(proc)
# output docs, sending binaries & sensors first
for md5sum in new_md5sums:
doc = self.input.get_binary_doc(md5sum)
if doc:
new_feed_ids |= self.update_feeds(doc)
self.output_binary_doc(doc)
else:
log.warning("Could not retrieve the binary MD5 %s referenced in the process with ID: %s"
% (md5sum, proc['unique_id']))
# TODO: right now we don't munge sensor or feed documents
for sensor in new_sensor_ids:
doc = self.input.get_sensor_doc(sensor)
if not doc:
log.warning("Could not retrieve sensor info for sensor id %s referenced in the process with ID: %s"
% (md5sum, proc['unique_id']))
doc = self.generate_fake_sensor(sensor)
self.output_sensor_info(doc)
for feed in new_feed_ids:
doc = self.input.get_feed_doc(feed)
if doc:
self.output_feed_doc(doc)
else:
log.warning("Could not retrieve feed document for id %s referenced in the process with ID: %s"
% (md5sum, proc['unique_id']))
self.output_process_doc(proc)
# clean up
self.input.cleanup()
self.output.cleanup()
sys.stdout.write('%-70s\r' % "")
sys.stdout.flush()
log.info("Transport complete from %s to %s" % (self.input.connection_name(), self.output.connection_name()))
def get_report(self):
return self.output.report()
class CleanseSolrData(object):
def __init__(self):
pass
@staticmethod
def munge_document(doc_type, doc_content):
doc_content.pop('_version_', None)
for key in list(doc_content):
if key.endswith('_facet'):
doc_content.pop(key, None)
return doc_content
class DataAnonymizer(object):
def __init__(self):
pass
@staticmethod
def translate(s):
"""
Super dumb translation for anonymizing strings.
:param s: input string
"""
s_new = ''
for c in s:
if c == '\\':
s_new += c
else:
c = chr((ord(c)-65 + 13) % 26 + 65)
s_new += c
return s_new
@staticmethod
def anonymize(doc):
hostname = doc.get('hostname', '')
hostname_new = DataAnonymizer.translate(hostname)
username = doc.get('username', '')
translation_usernames = {}
if len(username) > 0:
if username.lower() != 'system' and username.lower() != 'local service' and username.lower() != \
'network service':
pieces = username.split('\\')
for piece in pieces:
translation_usernames[piece] = DataAnonymizer.translate(piece)
for field in doc:
values = doc[field]
try:
if not values:
continue
was_list = True
targets = values
if not hasattr(values, '__iter__'):
was_list = False
targets = [values]
values = []
for target in targets:
target = target.replace(hostname, hostname_new)
for key in translation_usernames:
target = target.replace(key, translation_usernames.get(key))
values.append(target)
if not was_list:
values = values[0]
doc[field] = values
except AttributeError:
pass
return doc
def munge_document(self, doc_type, doc_content):
return self.anonymize(doc_content)
|
mit
| 3,804,607,781,867,005,400
| 37.835165
| 119
| 0.491865
| false
| 4.239952
| false
| false
| false
|
GrognardsFromHell/TemplePlus
|
tpdatasrc/co8infra/scr/Spell740 - Ray of Clumsiness.py
|
1
|
1719
|
from toee import *
import tpdp
def OnBeginSpellCast( spell ):
print "Ray of Cluminess OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
def OnSpellEffect( spell ):
print "Ray of Clumsiness OnSpellEffect"
def OnBeginRound( spell ):
print "Ray of Clumsiness OnBeginRound"
def OnBeginProjectile( spell, projectile, index_of_target ):
print "Ray of Clumsiness OnBeginProjectile"
#spell.proj_partsys_id = game.particles( 'sp-Ray of Enfeeblement', projectile )
projectile.obj_set_int( obj_f_projectile_part_sys_id, game.particles( 'sp-Ray of Enfeeblement', projectile ) )
def OnEndProjectile( spell, projectile, index_of_target ):
print "Ray of Clumsiness OnEndProjectile"
target_item = spell.target_list[0]
dam_bonus = min( 5, spell.caster_level / 2 )
dam_amount = spell.roll_dice_with_metamagic(1, 6, dam_bonus)
dam_amount = -dam_amount
print "amount=", dam_amount
spell.duration = 10 * spell.caster_level
game.particles_end( projectile.obj_get_int( obj_f_projectile_part_sys_id ) )
if spell.caster.perform_touch_attack( target_item.obj ) & D20CAF_HIT:
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 20022, tf_red )
target_item.obj.condition_add_with_args( 'sp-Cats Grace', spell.id, spell.duration, dam_amount )
target_item.partsys_id = game.particles( 'sp-Ray of Enfeeblement-Hit', target_item.obj )
else:
# missed
target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30007 )
game.particles( 'Fizzle', target_item.obj )
spell.target_list.remove_target( target_item.obj )
spell.spell_end( spell.id )
def OnEndSpellCast( spell ):
print "Ray of Clumsiness OnEndSpellCast"
|
mit
| -6,098,167,636,621,271,000
| 31.45283
| 111
| 0.733566
| false
| 2.612462
| false
| false
| false
|
arkon/cdf-scrapers
|
labs/labs.py
|
1
|
2979
|
from collections import OrderedDict
from html.parser import HTMLParser
import argparse
import datetime
import json
import os
import sys
import time
import urllib.request
class PageParser(HTMLParser):
"""Parser for CDF Lab Machine Usage page."""
def __init__(self):
HTMLParser.__init__(self)
# Flag for whether an element should be parsed
self.inCell = False
# A data row contains 6 cells
self.rowCell = 0
# List of lab rooms/data
self.data = []
# Timestamp
self.timestamp = ''
def handle_starttag(self, tag, attrs):
# Only read <td> tags
if tag == 'td':
self.inCell = True
def handle_data(self, data):
if not self.inCell:
return
if self.rowCell == 0:
if (data != 'NX'):
data = 'BA ' + data
self.data.append(OrderedDict([
('name', data)
]))
elif self.rowCell == 1:
self.data[-1]['available'] = int(data)
elif self.rowCell == 2:
self.data[-1]['busy'] = int(data)
elif self.rowCell == 3:
self.data[-1]['total'] = int(data)
elif self.rowCell == 4:
self.data[-1]['percent'] = float(data)
elif self.rowCell == 5:
if (self.timestamp == ''):
# Attempt to compensate for changing timezones,
# possibly due to daylight savings
rawTime = data.strip('\u00a0\\n')
timestamp = time.strptime(rawTime, '%a %b %d %H:%M:%S %Z %Y')
if timestamp:
self.timestamp = time.strftime(
'%Y-%m-%d %H:%M:%S %Z', timestamp)
self.rowCell = -1
self.rowCell += 1
self.inCell = False
if __name__ == '__main__':
argparser = argparse.ArgumentParser(
description='Scraper for CDF lab data.')
argparser.add_argument(
'-o', '--output',
help='The output path. Defaults to current directory.',
required=False)
argparser.add_argument(
'-f', '--filename',
help='The output filename. Defaults to "cdflabs.json".',
required=False)
args = argparser.parse_args()
output = '.'
filename = 'cdflabs.json'
# Get data
html = str(urllib.request.urlopen(
'http://www.teach.cs.toronto.edu/usage/usage.html').read())
parser = PageParser()
parser.feed(html)
data = OrderedDict([
('timestamp', parser.timestamp),
('labs', parser.data)
])
# Output
if args.output:
if not os.path.exists(args.output):
os.makedirs(args.output)
output = args.output
if args.filename:
filename = args.filename
if args.output or args.filename:
with open('%s/%s' % (output, filename), 'w+') as outfile:
json.dump(data, outfile)
else:
print(json.dumps(data))
|
mit
| -283,823,200,020,091,780
| 24.681034
| 77
| 0.536757
| false
| 3.998658
| false
| false
| false
|
Leibniz137/testinfra
|
testinfra/modules/supervisor.py
|
1
|
3502
|
# coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from testinfra.modules.base import Module
STATUS = [
"STOPPED", "STARTING", "RUNNING", "BACKOFF", "STOPPING", "EXITED",
"FATAL", "UNKNOWN",
]
class Supervisor(Module):
"""Test supervisor managed services
>>> gunicorn = Supervisor("gunicorn")
>>> gunicorn.status
'RUNNING'
>>> gunicorn.is_running
True
>>> gunicorn.pid
4242
"""
def __init__(self, name, _attrs_cache=None):
self.name = name
self._attrs_cache = _attrs_cache
super(Supervisor, self).__init__()
@staticmethod
def _parse_status(line):
splitted = line.split()
name = splitted[0]
status = splitted[1]
# supervisorctl exit status is 0 even if it cannot connect to
# supervisord socket and output the error to stdout.
# So we check that parsed status is a known status.
if status not in STATUS:
raise RuntimeError(
"Cannot get supervisor status. Is supervisor running ?")
if status == "RUNNING":
pid = splitted[3]
if pid[-1] == ",":
pid = int(pid[:-1])
else:
pid = int(pid)
else:
pid = None
return {"name": name, "status": status, "pid": pid}
@property
def _attrs(self):
if self._attrs_cache is None:
line = self.check_output("supervisorctl status %s", self.name)
attrs = self._parse_status(line)
assert attrs["name"] == self.name
self._attrs_cache = attrs
return self._attrs_cache
@property
def is_running(self):
"""Return True if managed service is in status RUNNING"""
return self.status == "RUNNING"
@property
def status(self):
"""Return the status of the managed service
Status can be STOPPED, STARTING, RUNNING, BACKOFF, STOPPING,
EXITED, FATAL, UNKNOWN.
See http://supervisord.org/subprocess.html#process-states
"""
return self._attrs["status"]
@property
def pid(self):
"""Return the pid (as int) of the managed service"""
return self._attrs["pid"]
@classmethod
def get_services(cls):
"""Get a list of services running under supervisor
>>> Supervisor.get_services()
[<Supervisor(name="gunicorn", status="RUNNING", pid=4232)>
<Supervisor(name="celery", status="FATAL", pid=None)>]
"""
services = []
for line in cls(None).check_output(
"supervisorctl status",
).splitlines():
attrs = cls._parse_status(line)
service = cls(attrs["name"], attrs)
services.append(service)
return services
def __repr__(self):
return "<Supervisor(name=%s, status=%s, pid=%s)>" % (
self.name,
self.status,
self.pid,
)
|
apache-2.0
| 7,123,089,475,247,661,000
| 29.99115
| 74
| 0.590805
| false
| 4.154211
| false
| false
| false
|
JessWalters/VinnyBot
|
Core/Stats.py
|
1
|
2045
|
import glob
import json
import os
import threading
import discord
from urllib.request import urlopen
from urllib.request import Request
from Config import getToken
commandsCalled = 0
members = {}
VINNY_COLOR = int('008cba', 16)
async def getStats(message, client):
serverCount = 0
channelCount = 0
for server in client.guilds:
serverCount += 1
for channel in server.channels:
channelCount += 1
for member in server.members:
members[member.id] = 1
if message.channel.permissions_for(message.guild.me).embed_links:
embed = discord.Embed(title='', colour=VINNY_COLOR)
embed.add_field(name='Servers',
value='{}'.format(serverCount),
inline=True)
embed.add_field(name='Channels', value=channelCount, inline=True)
embed.add_field(name='Users', value=len(members), inline=True)
try:
embed.add_field(name='Shards', value=str(len(client.shard_ids)), inline=False)
except TypeError:
embed.add_field(name='Shards', value=5, inline=False)
embed.set_author(name=client.user.name, icon_url=client.user.avatar_url)
return await message.channel.send("Find more detailed stats at: https://goo.gl/Jct6uL", embed=embed)
else:
await message.channel.send(message.channel, "Vinny Stats:\n`Servers: " + str(serverCount) + "\nChannels: " + str(channelCount)
+ "\n`")
def sendStatistics(client):
url = "https://bots.discord.pw/api/bots/" + getToken('Bot ID') + "/stats"
serverCount = len(client.guilds)
data = {
"server_count": serverCount
}
req = Request(url)
req.add_header('Content-Type', 'application/json')
req.add_header('Authorization', getToken('Bot API'))
response = urlopen(req, json.dumps(data).encode('utf8'))
print('Stats Posted Successfully')
t = threading.Timer(3600.0, sendStatistics, args=(client,))
t.setDaemon(True)
t.start()
|
mit
| -4,362,727,674,846,634,000
| 33.661017
| 134
| 0.632274
| false
| 3.724954
| false
| false
| false
|
dchaplinsky/pep.org.ua
|
pepdb/core/migrations/0160_auto_20190801_1806.py
|
1
|
1125
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-08-01 15:06
from __future__ import unicode_literals
from django.db import migrations, models
def publish_everything(apps, schema_editor):
Company = apps.get_model("core", "Company")
Person = apps.get_model("core", "Person")
Person.objects.all().update(publish=True)
Company.objects.all().update(publish=True)
class Migration(migrations.Migration):
dependencies = [
('core', '0159_auto_20190529_1550'),
]
operations = [
migrations.AlterField(
model_name='company',
name='publish',
field=models.BooleanField(default=True, verbose_name='\u041e\u043f\u0443\u0431\u043b\u0456\u043a\u0443\u0432\u0430\u0442\u0438'),
),
migrations.AlterField(
model_name='person',
name='publish',
field=models.BooleanField(default=True, verbose_name='\u041e\u043f\u0443\u0431\u043b\u0456\u043a\u0443\u0432\u0430\u0442\u0438'),
),
migrations.RunPython(
publish_everything, reverse_code=migrations.RunPython.noop),
]
|
mit
| 1,302,245,182,735,781,600
| 31.142857
| 141
| 0.643556
| false
| 3.45092
| false
| false
| false
|
delitamakanda/socialite
|
app/main/forms.py
|
1
|
2488
|
from flask_wtf import Form
from flask_pagedown.fields import PageDownField
from wtforms import TextField, StringField, SubmitField, TextAreaField, BooleanField, SelectField, ValidationError
from wtforms.validators import Required, Length, Regexp, EqualTo, Email
from ..models import User, Role, Comment
class CommentForm(Form):
body = StringField("", validators=[Required()])
submit = SubmitField('Submit')
class PostForm(Form):
body = PageDownField("What's on your mind ?", validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0,64)])
location = StringField('Location', validators=[Length(0,64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
username = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Usernames must have only letters, numbers, dots or underscore.')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0,64)])
location = StringField('Location', validators=[Length(0,64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name) for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ContactForm(Form):
name = StringField('Username', validators=[Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0, 'Usernames must have only letters, numbers, dots or underscore.')])
email = StringField('Email', validators=[Required(), Length(1, 64), Email()])
subject = TextField("Subject", validators=[Required()])
message = TextAreaField("Message", validators=[Required()])
submit = SubmitField("Send")
|
mit
| -8,109,677,442,507,174,000
| 43.428571
| 183
| 0.679662
| false
| 3.961783
| false
| false
| false
|
fred806/Pype
|
pypeNuke.py
|
1
|
52150
|
import nuke
import nukescripts
import os
from xml.dom import minidom
from xml.dom.minidom import Document
from xml.etree import ElementTree as ET
import getpass
import re
import ftplib
from ftplib import FTP
import uuid
import os2emxpath as path
import subprocess as sp
import logging
pypePath = os.environ['PYPE_PATH']
def pypeMenuNk():
#Building the menu
menubar=nuke.menu("Nuke")
m=menubar.addMenu("&Pype")
m.addCommand( 'Set Project', "pypeSetProjectNk()" )
m.addCommand( 'Project Info', "getPypeProjectInfoNk()" )
m.addSeparator()
m.addCommand( 'Open', "pypeShotOpenNk()" )
m.addCommand( 'Save+', "incrementalSaveNk()" )
m.addSeparator()
m.addCommand( 'Create Write Node', "createWriteNodeNk()" )
m.addCommand( 'Send Render to Pype', "renderToPypeNk()" )
m.addCommand( 'Apply Shot In-Out Frames', "applyShotInOutFramesNk()" )
m.addCommand( 'Apply Shot Handles', "applyShotHandlesNk()" )
m.addCommand( 'Apply Shot Framerate', "applyShotFramerateNk()" )
m.addCommand( 'Apply Shot Resolution', "applyShotResolutionNk()" )
m.addSeparator()
m.addCommand( 'Save WIP image', "savePypeWIPNk()" )
m.addCommand( 'Update Thumbnail', "saveThumbnailNk()" )
m.addCommand( 'Save Shot Cover', "saveCoverNk()" )
#m.addSeparator()
#m.addCommand( pypeMenu+'/Load Files', "print('Yeah!')" )
#m.addCommand( pypeMenu+'/Create Write Node', "print('Yeah!')" )
############################################################
class PypeShotOpenPanel( nukescripts.PythonPanel ):
def __init__( self ):
'''Lists all shots and their respective versions'''
nukescripts.PythonPanel.__init__( self, 'Pype Shot Open' )
# CREATE KNOBS
self.setMinimumSize(450, 100)
pypeInit()
getShotList()
shotList = getShotList()
seqList = getSeqList()
self.seqKnob = nuke.Enumeration_Knob( 'seq', 'Sequence ', seqList )
self.seqKnob.clearFlag( nuke.STARTLINE )
self.shotKnob = nuke.Enumeration_Knob( 'shot', ' Shot ', shotList )
self.shotKnob.clearFlag( nuke.STARTLINE )
self.versionKnob = nuke.Enumeration_Knob( 'version', ' Version ', [] )
self.versionKnob.clearFlag( nuke.STARTLINE )
self.descriptionKnob = nuke.Text_Knob('', '', ' ')
self.descriptionKnob2 = nuke.String_Knob('', '', ' ')
# ADD KNOBS
for k in ( self.seqKnob, self.shotKnob, self.versionKnob ):
self.addKnob( k )
# STORE DICTIONARY OF shotS PER TYPE
self.seqShotDict = {}
self.versionDict = {}
# FILL DICTIONARY
self.getSeqData( seqList )
self.getShotData( shotList )
def getSeqData( self, seqList ):
seqList = getSeqList()
dict = {}
for seq in seqList:
shots = getSeqShotList( seq )
dict[seq] = shots
print ( str(seq) + ":" + str(shots) )
self.seqShotDict = dict
def getShotData( self, shotList ):
shotList = getShotList()
dict = {}
for shot in shotList:
versions = getVersionsList( shot )
dict[shot] = versions
print ( str(shot) + ":" + str(versions) )
self.versionDict = dict
def knobChanged( self, knob ):
if knob is self.seqKnob or knob.name()=='showPanel':
self.shotKnob.setValues( self.seqShotDict[ self.seqKnob.value() ] )
self.shotKnob.setValue(0)
self.versionKnob.setValues( self.versionDict[ self.shotKnob.value() ] )
self.versionKnob.setValue(0)
if knob is self.shotKnob:
self.shotKnob.setValues( self.seqShotDict[ self.seqKnob.value() ] )
self.versionKnob.setValues( self.versionDict[ self.shotKnob.value() ] )
############################################################
def pypeShotOpenNk():
p = PypeShotOpenPanel()
if p.showModalDialog():
#print ( p.shotKnob.value(), p.versionKnob.value() )
file = ( projectPath + '/CG/Shots/' + p.shotKnob.value() + '/Comp/Nuke/' + p.shotKnob.value() + '_Comp_' + p.versionKnob.value() + '.nk' )
nuke.scriptOpen( file )
############################################################
def getShotNukePath( shotName ):
shotNukePath = ( projectPath + '/CG/Shots/' + shotName + '/Comp/Nuke')#For testing
return shotNukePath
############################################################
def getShotList():
#Listing folders
fileFolder =( projectPath + '/CG/Shots/' )
shotList = []
if os.path.exists( fileFolder ):
for file in os.listdir( fileFolder ):
#Nuke File Validation
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}$") )
if rex1.match(file):
shotList.extend([str(file)])
elif rex2.match(file):
shotList.extend([str(file)])
else:
pass
#print(file)
#shotList.extend([str(file)])
return shotList
############################################################
def getSeqShotList( seqName ):
#Listing folders
fileFolder =( projectPath + '/CG/Shots/' )
shotList = []
if os.path.exists( fileFolder ):
for file in os.listdir( fileFolder ):
if ('Shot_' + seqName ) in file:
#Nuke File Validation
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}$") )
if rex1.match(file):
shotList.extend([str(file)])
elif rex2.match(file):
shotList.extend([str(file)])
else:
pass
#print(file)
#shotList.extend([str(file)])
else:
pass
return shotList
############################################################
def getSeqList():
#Listing folders
fileFolder =( projectPath + '/CG/Shots/' )
seqList = []
shotList = getShotList()
if os.path.exists( fileFolder ):
for item in shotList:
fileName = item
shotNumber = fileName[-3::]
seqNumber = fileName[-7:-4]
if seqNumber in seqList:
pass
else:
seqList.append(seqNumber)
return seqList
############################################################
def getVersionsList( shotName ):
fileFolder = ( projectPath + '/CG/Shots/' + shotName + '/Comp/Nuke' )
shotVersionsList = []
if os.path.exists( fileFolder ):
for file in os.listdir( fileFolder ):
if file.endswith(".nk"):
fileName = file.split('/')[-1].partition(".")[0]
shotVersion = fileName[-4::]
#print(shotVersion)
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}.nk$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}.nk$") )
if rex1.match(file):
shotVersionsList.extend([str(shotVersion)])
elif rex2.match(file):
shotVersionsList.extend([str(shotVersion)])
else:
pass
shotVersionsList.reverse()
return shotVersionsList
############################################################
def pypeInit():
#Vars
global projectPath
global projectName
pypePath = os.environ['PYPE_PATH']
#Vars
fileXML = (pypePath + '/pypeSettings.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('item')
#Reading current project
projectPath = ''
for s in itemlist:
user = getpass.getuser()
#print ('User: ' + user)
if s.attributes['user'].value == user:
projectPath = s.attributes["project"].value
else:
pass
#print projectPath
projectName = projectPath.split('/')[-1].partition(".")[0]
projectName = os.path.basename(os.path.normpath( projectPath ))
print ('Project Name: ' + projectName)
############################################################
def getPypeProjectInfoNk():
global projectPath
pypeProject = projectPath
fileXML = (pypeProject + '/_pype/pypeProject.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
projectName = s.attributes['name'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
type = s.attributes['type'].value
user = s.attributes['user'].value
nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
############################################################
# DEFINE SHOT'S NUKE DIR
def nukeDir( projectPath, shotName ):
nkDir = ( projectPath + '/CG/Shots/' + shotName + '/Comp/Nuke/' )
if not os.path.isdir( nkDir ):
raise ValueError, 'NUKE directory does not exist'
return nkDir
############################################################
def incrementalSaveNk():
fileSaved = False
version = 1
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
while not fileSaved:
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version = fileName[-3::]
newVersion = "%03d" % (int(version) + 1)
newFileName = (fileNameShort + '_Comp_v' + newVersion + ext)
newFileNameLong = (fileFolder + "\\" + fileNameShort + '_Comp_v' + newVersion + ext)
newVersionExists = os.path.exists(newFileNameLong)
if os.path.isfile( newFileNameLong ):
print('File already exists!')
#versionTmp = newVersion
#newVersion = ( int(versionTmp) + 1 )
#newVersion += 1
#newFileNameLong = (fileFolder + "\\" + fileNameShort + '_v' + newVersion + ext)
#continue
break
comment = nuke.getInput( 'Comment', '' )
#XML-LOCAL
fileXMLFolder = ( fileFolder + '/_pype')
if not os.path.isdir(fileXMLFolder):
os.makedirs(fileXMLFolder)
fileXML = ( fileXMLFolder + '/pypeHistory.xml' )
user = getpass.getuser()
doc = Document()
root_node = doc.createElement("history")
doc.appendChild(root_node)
object_node = doc.createElement("data")
root_node.appendChild(object_node)
# set attributes
object_node.setAttribute("version", "001")
object_node.setAttribute("user", user)
object_node.setAttribute("comment", "Initial save")
xml_file = open( fileXML, "w")
xml_file.write(doc.toprettyxml())
xml_file.close()
if not os.path.isfile( ( fileXMLFolder + '/pypeHistory.xml' ) ):
fileXML = ( fileXMLFolder + '/pypeHistory.xml' )
user = getpass.getuser()
doc = Document()
root_node = doc.createElement("history")
doc.appendChild(root_node)
object_node = doc.createElement("data")
root_node.appendChild(object_node)
# set attributes
object_node.setAttribute("version", "001")
object_node.setAttribute("user", user)
object_node.setAttribute("comment", "Initial save")
xml_file = open( fileXML, "w")
xml_file.write(doc.toprettyxml())
xml_file.close()
fileXML = ( fileXMLFolder + '/pypeHistory.xml' )
user = getpass.getuser()
doc = ET.parse( fileXML )
root = doc.getroot()
#Data to add
data1 = ET.Element("data", {"version": newVersion, "user": user, "comment": comment})
root.append(data1)
out = ET.tostring(root)
dom = minidom.parseString(out)
xml_file = open( fileXML, "w")
xml_file.write(dom.toprettyxml())
xml_file.close()
#
#FTP PYPE
ftp_send(fileXML, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp"), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp/pypeHistory.xml") )
#Saving
nuke.scriptSaveAs( newFileNameLong )
fileSaved = True
saveThumbnailNk()
break
else:
nuke.message("File not recognized by Pype.")
else:
nuke.message("File not recognized by Pype.")
else:
nuke.message("File not recognized by Pype.")
############################################################
def pypeFileCheckNk():
pypeFileCheck = False
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
pypeFileCheck = True
return pypeFileCheck
############################################################
def applyShotInOutFramesNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
nuke.knob("root.first_frame", InFrame )
nuke.knob("root.last_frame", OutFrame )
else:
nuke.message("File not recognized by Pype.")
############################################################
def applyShotHandlesNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
InFrameHandle = ( float(InFrame) - float(Handles) )
OutFrameHandle = ( float(OutFrame) + float(Handles) )
nuke.knob("root.first_frame", str(InFrameHandle) )
nuke.knob("root.last_frame", str(OutFrameHandle) )
else:
nuke.message("File not recognized by Pype.")
############################################################
def applyShotFramerateNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
InFrameHandle = ( float(InFrame) - float(Handles) )
OutFrameHandle = ( float(OutFrame) + float(Handles) )
nuke.knob("root.fps", Framerate )
else:
nuke.message("File not recognized by Pype.")
############################################################
def applyShotResolutionNk():
global projectPath
pypeFileCheck = pypeFileCheckNk()
if pypeFileCheck == True:
pypeProject = projectPath
projectFolder = projectPath
path = nuke.root().knob('name').value()
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
#fileXML = (pypeProject + '/_pype/pypeProject.xml' )
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
Handles = s.attributes['Handles'].value
resHeight = s.attributes['resHeight'].value
resWidth = s.attributes['resWidth'].value
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#nuke.message(('Project is ' + str(projectName) + ', a ' + str(type) + ' at ' + str(Framerate) + 'fps\nResolution is ' + str(resWidth) + ' by ' + str(resHeight) + '\nIt was created by ' + str(user) ))
InFrameHandle = ( float(InFrame) - float(Handles) )
OutFrameHandle = ( float(OutFrame) + float(Handles) )
#
pypeRez = ( str(resWidth) + ' ' + str(resHeight) + " PypeRez")
nuke.addFormat( pypeRez )
root = nuke.root()
root['format'].setValue( 'PypeRez' )
else:
nuke.message("File not recognized by Pype.")
############################################################
def easySave():
nkDir = nukeDir()
# GET DESCRIPTION FROM USER BUT STRIP ALL WHITE SPACES
#description = nuke.getInput( 'script description', 'bashComp' ).replace( ' ', '' )
fileSaved = False
version = 1
while not fileSaved:
# CONSTRUCT FILE NAME
global shotName
nkName = ( '%s_v%03d.nk' % ( shotName ), version )
# JOIN DIRECTORY AND NAME TO FORM FULL FILE PATH
nkPath = os.path.join( nkDir, nkName )
# IF FILE EXISTS VERSION UP
if os.path.isfile( nkPath ):
version += 1
continue
# SAVE NUKE SCRIPT
nuke.scriptSaveAs( nkPath )
fileSaved = True
return nkPath
############################################################
def pypeSetProjectNk():
pypePath = os.environ['PYPE_PATH']
projectPathPicker = nuke.getFilename('Select Project Source Folder', '')
print ( projectPathPicker )
global projectPath
global projectName
if projectPathPicker == None:
#print ('Cancelled' )
print('Cancelled' )
#projectPath = ''
#projectName = ''
else:
projectPath = projectPathPicker
projectName = os.path.basename(os.path.normpath(projectPathPicker))
#Vars
pypePath = os.environ['PYPE_PATH']
fileXML = (pypePath + '/pypeSettings.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('item')
user = getpass.getuser()
project = projectPath
users=[]
for s in itemlist:
users.append(s.attributes['user'].value)
if user in users:
for s in itemlist:
if s.attributes['user'].value == user:
#print 'Exists'
#We update
s.attributes['project'].value = project
#Writing file
f = open(fileXML, 'w')
parsedXML.writexml(f)
f.close()
break
else:
pass
else:
#print "Doesn't exist"
print('Need to add that guy')
fileXML = (pypePath + '/pypeSettings.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('item')
#Applying new project
doc = ET.parse( fileXML )
root = doc.getroot()
#Data to add
data1 = ET.Element('item', {'project': project, 'user': user})
root.append(data1)
out = ET.tostring(root)
dom = minidom.parseString(out)
xml_file = open( fileXML, 'w')
xml_file.write(dom.toprettyxml())
xml_file.close()
#FTP
#Ftp disabled for pypeSettings because it has no influence on the web interface
#ftp_send( fileXML , "www/pype/", "www/pype/pypeSettings.xml")
############################################################
def saveThumbnailNk():
global projectName
global projectPath
try:
sel = nuke.selectedNode()
except:
return
#image size multiplier
#this will be the thumbnail size (in precentage, 1 is 100%) compared to the original image
imageSize = 0.15
#thumbnail will be saved with this image format
fileType = "jpg"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#Disabling Proxy
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
thumbnailDir = scriptsPath + "/_pype"
if not os.path.isdir(thumbnailDir):
os.makedirs(thumbnailDir)
#full thumbnail path
#thumbnailName = ( scriptName + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
thumbnailName = ( fileNameShort + "_Comp_v" + version + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullThumbnailPath = ( thumbnailDir + '/' + fileNameShort + "_Comp_v" + version + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
#r["type"].setValue("scale")
#r["scale"].setValue(imageSize)
r["type"].setValue("to box")
r["box_width"].setValue( 170 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(100)
r["black_outside"].setValue(True)
r["resize"].setValue("height")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullThumbnailPath)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#FTP
ftp_send(fullThumbnailPath, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp"), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp/" + thumbnailName) )
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def saveCoverNk():
global projectName
global projectPath
try:
sel = nuke.selectedNode()
except:
return
#image size multiplier
#this will be the thumbnail size (in precentage, 1 is 100%) compared to the original image
imageSize = 1
#thumbnail will be saved with this image format
fileType = "jpg"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#Disabling Proxy
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
#thumbnailDir = scriptsPath + "/_pype"
thumbnailDir = (projectPath + "/_pype/Shots/" + fileNameShort)
if not os.path.isdir(thumbnailDir):
os.makedirs(thumbnailDir)
#full thumbnail path
coverName = ( fileNameShort + ".{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
thumbnailName = ( fileNameShort + "_thumbnail.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullCoverPath = ( thumbnailDir + '/' + fileNameShort + ".{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullThumbnailPath = ( thumbnailDir + '/' + fileNameShort + "_thumbnail.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
r["type"].setValue("to box")
r["box_width"].setValue( 320 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(180)
r["black_outside"].setValue(True)
r["resize"].setValue("height")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullCoverPath)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(r)
nuke.delete(w)
#thumbnail
#Reformat for small thumbnail
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
r["type"].setValue("to box")
r["box_width"].setValue( 170 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(100)
r["black_outside"].setValue(True)
r["resize"].setValue("height")
#Write Node for small thumbnail
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullThumbnailPath)
nuke.execute(w,nuke.frame(),nuke.frame())
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#FTP
ftp_send(fullCoverPath, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/" + coverName) )
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def savePypeWIPNk():
global projectName
global projectPath
try:
sel = nuke.selectedNode()
except:
return
#image size multiplier
#this will be the thumbnail size (in precentage, 1 is 100%) compared to the original image
imageSize = 0.15
#thumbnail will be saved with this image format
fileType = "jpg"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
path = nuke.root().knob('name').value()
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
imageID = uuid.uuid4()
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#Disabling Proxy
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
imageFolder = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/')
imageFolderThumbnail = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/thumbnail/')
if not os.path.isdir(imageFolder):
os.makedirs(imageFolder)
if not os.path.isdir(imageFolderThumbnail):
os.makedirs(imageFolderThumbnail)
#full thumbnail path
#thumbnailName = ( scriptName + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
imageName = ( fileNameShort + "_Comp_v" + version + "_wip_" + str(imageID) + ".{ext}").format(imageFolder=imageFolder, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullImagePath = ( imageFolder + '/' + fileNameShort + "_Comp_v" + version + "_wip_" + str(imageID) + ".{ext}").format(imageFolder=imageFolder, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullThumbnailPath = ( imageFolder + '/thumbnail/' + fileNameShort + "_Comp_v" + version + "_wip_" + str(imageID) + ".{ext}").format(imageFolder=imageFolder, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,sel)
w.setXYpos(sel.xpos(), sel.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullImagePath)
w.knob('_jpeg_quality').setValue("1")
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(w)
#Thumbnail
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,sel)
r.setXYpos(sel.xpos(), sel.ypos()+50)
#r["type"].setValue("scale")
#r["scale"].setValue(imageSize)
r["type"].setValue("to box")
r["box_width"].setValue( 300 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(300)
r["resize"].setValue("fill")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue(fileType)
w.knob('_jpeg_quality').setValue(1)
w.knob("file").setValue(fullThumbnailPath)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#FTP
ftp_send(fullImagePath, ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort ), ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort + "/" + imageName) )
ftp_send(fullThumbnailPath, ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort + "/thumbnail"), ( "www/pype/projects/" + projectName + "/uploads/Shots/" + fileNameShort + "/thumbnail/" + imageName) )
nuke.message("WIP saved!")
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def createWriteNodeNk():
global projectName
global projectPath
if nuke.exists( "PypeWrite" ):
w = nuke.toNode("PypeWrite")
nuke.delete(w)
try:
sel = nuke.selectedNode()
except:
return
#thumbnail will be saved with this image format
fileType = "exr"
scriptsRoot = nuke.root().name()
if scriptsRoot != "" and scriptsRoot != "Root":
path = nuke.root().knob('name').value()
projectFolder = projectPath
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
version= fileName[-3::]
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
#
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
InFrame = s.attributes['InFrame'].value
OutFrame = s.attributes['OutFrame'].value
#
if rex1.match(fileName) or rex2.match(fileName):
if sel.Class() != "Viewer":
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
renderDir = (projectPath + "/CG/Shots/" + fileNameShort + "/Renders/Comp_Renders/" + fileName)
if not os.path.isdir(renderDir):
os.makedirs(renderDir)
#full thumbnail path
#thumbnailName = ( scriptName + "_snapshot.{ext}").format(thumbnailDir=thumbnailDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
fullRenderPath = ( renderDir + '/' + fileNameShort + "_Comp_v" + version + ".####.{ext}").format(thumbnailDir=renderDir, nodeName = sel["name"].getValue(), frame = nuke.frame(), ext = fileType)
#write node
#w = nuke.createNode("Write", inpanel = False)
w = nuke.nodes.Write (name="PypeWrite")
w.setInput(0,sel)
w.setXYpos(sel.xpos(), sel.ypos()+50)
w.knob("use_limit").setValue(True)
w.knob("first").setValue(float(InFrame))
w.knob("last").setValue(float(OutFrame))
w.knob("file_type").setValue(fileType)
w.knob("file").setValue(fullRenderPath)
#FTP
#ftp_send(fullThumbnailPath, ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp"), ( "www/pype/projects/" + projectName + "/_pype/Shots/" + fileNameShort + "/Nuke/Comp/" + thumbnailName) )
else:
nuke.message("Viewer not supported. Please choose another node.")
else:
nuke.message("You havent't saved your nuke script, yet. Please save your nuke script first.")
############################################################
def renderToPypeNk():
global projectName
global projectPath
scriptsRoot = nuke.root().name()
path = nuke.root().knob('name').value()
projectFolder = projectPath
fileFolder = os.path.abspath(os.path.join(path, os.pardir))
ext = path[-3::]
fileName = path.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
shotName = fileNameShort
version= fileName[-3::]
imageID = uuid.uuid4()
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
ext = "exr"
#
fileXML = ( projectFolder + '/_pype/Shots/' + shotName + '/pypeShotInfo.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('data')
#project = projectPath
for s in itemlist:
Framerate = s.attributes['Framerate'].value
InFrame = s.attributes['InFrame'].value
InFrame = InFrame.zfill(4)
OutFrame = s.attributes['OutFrame'].value
OutFrame = OutFrame.zfill(4)
#
if rex1.match(fileName) or rex2.match(fileName):
#LOCAL
#scriptsPath and name
scriptsPath = os.path.dirname(scriptsRoot)
scriptName = os.path.basename(scriptsRoot).split(".nk")[0]
#create thumbnail dir if not exist
renderDir = (projectPath + "/CG/Shots/" + fileNameShort + "/Renders/Comp_Renders/" + fileName)
fullRenderPath = ( renderDir + '/' + fileNameShort + "_Comp_v" + version + ".%4d." + ext)
fullRenderPath = fullRenderPath.replace("//", "/" )
firstRenderFrame = ( renderDir + '/' + fileNameShort + "_Comp_v" + version + "."+InFrame+"." + ext)
firstRenderFrame = firstRenderFrame.replace("//", "/" )
print firstRenderFrame
#import os.path
if os.path.isfile(firstRenderFrame):
print "File exists! Sending to Pype"
sendRenderToPype(fullRenderPath,InFrame,Framerate)
nuke.message("Sent to Pype!")
else:
print "File doesn't exist..."
############################################################
def sendRenderToPype(in_file,start_number,framerate):
global projectName
global projectPath
pypePath = os.environ['PYPE_PATH']
ffmpegBin = (pypePath + '/bin/ffmpeg-3.3.2-win64-static/bin/ffmpeg.exe')
path = nuke.root().knob('name').value()
filePath = in_file
#filePath = path.normpath(in_file)
fileFolder = os.path.abspath(os.path.join(filePath, os.pardir))
fileFolder = '/'.join(fileFolder.split('\\'))
#fileFolder = path.normpath(fileFolder)
ext = filePath[-3::]
fileName = filePath.split('/')[-1].partition(".")[0]
fileNameShort = fileName[0:-10]
version= fileName[-3::]
imageID = uuid.uuid4()
rex1 = re.compile( ("^Shot_[0-9A-Za-z]{3}-[0-9]{3}_Comp_v[0-9]{3}$") )
rex2 = re.compile( ("^Shot_[0-9]{3}_Comp_v[0-9]{3}$") )
if rex1.match(fileName) or rex2.match(fileName):
imageFolder = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/')
if not os.path.exists(imageFolder):
os.makedirs(imageFolder)
imageFolderThumbnail = (projectPath + '/_pype/Shots/' + fileNameShort + '/wip/thumbnail/')
if not os.path.exists(imageFolderThumbnail):
os.makedirs(imageFolderThumbnail)
print ("Saving " + fileName + ".mp4" + " in " + fileFolder)
out_file = (imageFolder + "/" + fileName + str(imageID) + ".mp4")
out_thumb = (imageFolder + "/" + fileName + str(imageID) + ".jpg")
out_thumb_temp = (imageFolderThumbnail + "/" + fileName + str(imageID) + "_temp.jpg")
#out_thumb_temp = os.path.join(fileFolder, (fileName + "_temp.jpg"))
print out_file
if os.path.isfile(out_file) :
print "Exists!"
os.remove(out_file)
pass
else:
pass
if in_file.endswith(".mov"):
ffmpeg = sp.Popen([ffmpegBin, '-r', framerate, '-f', 'mp4', '-i', in_file, '-vcodec', 'libx264', '-f', 'mp4', '-r', framerate, '-pix_fmt', 'yuv420p', out_file], stdout = sp.PIPE, stderr = sp.STDOUT)
out, err = ffmpeg.communicate()
print out, err, ffmpeg.returncode
else:
ffmpeg = sp.Popen([ffmpegBin, '-r', framerate, '-apply_trc', 'iec61966_2_1', '-start_number', start_number, '-i', in_file, '-f', 'mp4', '-r', framerate, '-pix_fmt', 'yuv420p', out_file], stdout = sp.PIPE, stderr = sp.STDOUT)
out, err = ffmpeg.communicate()
print out, err, ffmpeg.returncode
#process_output = ffmpeg.communicate()
#for output in process_output:
# print output
#Creating Thumbnail
ffmpeg = sp.Popen([ffmpegBin, '-ss', '00:00:01', '-t', '00:00:00.04', '-i', out_file, '-r', framerate, out_thumb_temp], stdout = sp.PIPE, stderr = sp.STDOUT)
out, err = ffmpeg.communicate()
print out, err, ffmpeg.returncode
#process_output = ffmpeg.communicate()
#for output in process_output:
# print output
#Cropping thumbnail
#Getting size
thumbnailPath = out_thumb
#
proxy = nuke.root().knob('proxy').value()
nuke.root().knob('proxy').setValue(False)
#write node
rd = nuke.createNode("Read", inpanel = False)
rd.knob("name").setValue("capture1")
rd.knob("file").setValue(out_thumb_temp)
#delete nodes
#nuke.delete(w)
#Thumbnail
#reformat node
r = nuke.createNode("Reformat", inpanel = False)
r.setInput(0,rd)
r.setXYpos(rd.xpos(), rd.ypos()+50)
#r["type"].setValue("scale")
#r["scale"].setValue(imageSize)
r["type"].setValue("to box")
r["box_width"].setValue( 300 )
r["box_fixed"].setValue(1)
r["box_height"].setValue(300)
r["resize"].setValue("fill")
#write node
w = nuke.createNode("Write", inpanel = False)
w.setInput(0,r)
w.setXYpos(r.xpos(), r.ypos()+50)
w.knob("name").setValue("capture")
w.knob("use_limit").setValue(True)
w.knob("first").setValue(nuke.frame())
w.knob("last").setValue(nuke.frame())
w.knob("file_type").setValue("jpg")
w.knob("file").setValue(out_thumb)
nuke.execute(w,nuke.frame(),nuke.frame())
#delete nodes
nuke.delete(rd)
nuke.delete(r)
nuke.delete(w)
nuke.root().knob('proxy').setValue(proxy)
#
#PYPE FTP
imageName = ((out_file.split('/')[-1].partition(".")[0]) + ".mp4")
thumbName = ((out_thumb.split('/')[-1].partition(".")[0]) + ".jpg")
imagePype = ( '/projects/' + projectName + '/uploads/Shots/' + fileNameShort + '/' + imageName )
imagePypePath = ( '/projects/' + projectName + '/uploads/Shots/' + fileNameShort )
thumbnailPypePath = ( '/projects/' + projectName + '/uploads/Shots/' + fileNameShort + '/thumbnail' )
#
ftp_send( out_file, ( "www/pype" + imagePypePath), ("www/pype" + imagePypePath + '/' + imageName) )
ftp_send( out_thumb, ( "www/pype" + thumbnailPypePath), ("www/pype" + thumbnailPypePath + '/' + thumbName) )
############################################################
def chdir(ftp, directory):
ch_dir_rec(ftp,directory.split('/'))
def directory_exists(ftp, directory):
filelist = []
ftp.retrlines('LIST',filelist.append)
for f in filelist:
if f.split()[-1] == directory and f.upper().startswith('D'):
return True
return False
def ch_dir_rec(ftp, descending_path_split):
if len(descending_path_split) == 0:
return
next_level_directory = descending_path_split.pop(0)
if not directory_exists(ftp,next_level_directory):
ftp.mkd(next_level_directory)
ftp.cwd(next_level_directory)
ch_dir_rec(ftp,descending_path_split)
def ftp_transfer(session, sourcefile, targetfile):
file = open(sourcefile,'rb') # file to send
session.storbinary(('STOR ' + targetfile), file) # send the file
file.close() # close file and FTP
session.quit()
def ftp_send(sourcefile, targetfolder, targetfile):
pypePath = os.environ['PYPE_PATH']
fileXML = (pypePath + '/pypeFTP.xml' )
parsedXML = minidom.parse(fileXML)
itemlist = parsedXML.getElementsByTagName('ftp')
#project = projectPath
for s in itemlist:
ftphome = s.attributes['ftphome'].value
log = s.attributes['log'].value
pw = s.attributes['pw'].value
try:
session = ftplib.FTP(ftphome,log, pw)
ftp = FTP(ftphome)
ftp.login(log, pw)
directory = targetfolder
chdir(ftp, directory)
#ftp_transfer(session, sourcefile, targetfile)
file = open(sourcefile,'rb') # file to send
session.storbinary(('STOR ' + targetfile), file) # send the file
file.close() # close file and FTP
session.quit()
except ftplib.all_errors:
print "Error during the FTP transfer!"
############################################################
pypeInit()
pypeMenuNk()
|
mit
| -3,904,076,662,467,990,500
| 42.763948
| 245
| 0.527651
| false
| 3.810463
| false
| false
| false
|
bradkav/ATLASfits
|
DiphotonFits.py
|
1
|
1961
|
#--DiphotonFits.py - Version 1 - 04/02/2016
#--Author: Bradley J Kavanagh
#--Summary: Code for fitting the ATLAS diphoton data
#--and calculating the significance of the 750 GeV excess
#--Note: Requires emcee (http://dan.iel.fm/emcee/current/)
#--Please report any problems to: bradkav@gmail.com
print "----Likelihood fits to ATLAS diphoton data---"
import numpy as np
from DiphotonFits_utils import getBestFit
#----Options----
#Print best-fit points to file (in fits folder)
saveResults = 1
#----Main procedure-----
BG_ID = ['k = 0 (fixed norm.)',
'k = 1 (fixed norm.)',
'k = 2 (fixed norm.)',
'k = 0 (free norm.)',
'k = 1 (free norm.)',
'k = 2 (free norm.)',]
SIGNAL_ID = ['Background-only',
'Signal+BG (NWA)',
'Signal+BG (Free-width)']
#Loop over possible background parametrisations
for i in range(6):
print "---------------------"
print "Background function:" ,BG_ID[i]
#Background-only fit
like_BG, bf_BG = getBestFit(i, 0)
print " Background-only"
print " lnL:", '{:.2f}'.format(like_BG)
if (saveResults):
np.savetxt('fits/Fits_BG=' + str(i) + '_BG-only.txt', bf_BG)
#Narrow width fit
like_NWA, bf_NWA = getBestFit(i, 1)
print " Signal+BG (NWA)"
print " lnL:", '{:.2f}'.format(like_NWA)
if (saveResults):
np.savetxt('fits/Fits_BG=' + str(i)+ '_NWA.txt', bf_NWA)
#Free width fit
like_wide, bf_wide = getBestFit(i, 2)
print " Signal+BG (Free width)"
print " lnL:", '{:.2f}'.format(like_wide)
if (saveResults):
np.savetxt('fits/Fits_BG=' + str(i) + '_wide.txt', bf_wide)
#Calculate significance
sig_NWA = np.sqrt(2*(like_NWA - like_BG))
sig_wide = np.sqrt(2*(like_wide - like_BG))
print " "
print " Significance (NWA):", '{:.2f}'.format(sig_NWA)
print " Significance (wide):", '{:.2f}'.format(sig_wide)
|
mit
| -6,611,880,352,664,472,000
| 29.169231
| 68
| 0.570117
| false
| 2.913819
| false
| false
| false
|
yangautumn/turing_pattern
|
amorphous_pattern/grid_graph.py
|
1
|
1941
|
"""
Simulate how Droplets will perform with Young's Model on Grid Graphs with different density
Author: Yang Li
Date: July 14, 2017
# randomly generate N droplets with diameter = 4.5cm within a square
of length 60cm
"""
from AmorphousGraph import *
# 'factor' value kind of means how much strength to push each activator againt each other
def grid_Graph(pattern_def, path_to, dens):
(pattern_idx, hight_a, width_a, hight_i, width_i, factor) = pattern_def
lims = [20, 20]
step_size = 1.0/np.sqrt(dens)
xs = []
ys = []
# randomly generate a set of coordination
# make sure there is no duplicated ones
for x in np.arange(0, lims[0]+1, step_size):
for y in np.arange(0, lims[1]+1, step_size):
xs.append(x)
ys.append(y)
print ("--> Coordination ready to go!")
# start of creating the instance of amorphous graph for pattern formation
num = len(xs)
ag = AmorphousGraph(num, hight_a, width_a, hight_i, width_i, pattern_idx, factor)
ag.initDroplets(xs, ys)
ag.initCircles()
print ("--> Amorphous Graph ready to go!")
for fi in np.arange(3, 10):
ag.resetColors()
ag.factor = (float)('%.2f' % (-0.1*fi))
counter = 0
while ag.oneLoop() > 0:
counter += 1
ag.drawPattern_Grid('dens{}'.format(dens), lims, path_to)
print ("--> Done with drawing with factor", ag.factor)
if __name__ == "__main__":
path_to = "D:/Github/Data/turing_pattern/Aug8_gridGraph_square"
path_to = "/home/yang/Dropbox/Data/turing_pattern/Aug8_gridGraph_square"
pattern_def_0 = (0, 2, 5, 6, 5, -0.50)
pattern_def_1 = (1, 5, 2, 5, 6, -0.50)
pattern_def_2 = (2, 3, 3, 5, 5, -0.55)
pattern_def_set = [pattern_def_0, pattern_def_1, pattern_def_2]
for pd in pattern_def_set:
for i in range(3, 10):
dens = i+1
print ("\n Start! -- [Pattern]:", pd, "[dens]:", dens)
t0 = time.time()
grid_Graph(pd, path_to, dens)
print ("\n Done! -- [Pattern]:", pd, "[dens]:", dens, "[Time used]:", time.time()-t0)
|
gpl-3.0
| 1,260,177,515,249,739,000
| 25.958333
| 91
| 0.646574
| false
| 2.615903
| false
| false
| false
|
DeflatedPickle/Colony
|
colony/time.py
|
1
|
1986
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
""""""
from tkinter import IntVar
__title__ = "Time"
__author__ = "DeflatedPickle"
__version__ = "1.3.1"
class Time(object):
def __init__(self, hours: int = 0, minutes: int = 0, seconds: int = 0):
self._hours = IntVar(value=hours)
self._minutes = IntVar(value=minutes)
self._seconds = IntVar(value=seconds)
self.check_time()
def get_time(self):
"""Returns the current time."""
return int("".join(map(str, [self._hours.get(), self._minutes.get(), self._seconds.get()])))
def get_time_formatted(self):
"""Returns the time formatted for readability."""
return "{}:{}:{}".format(self._hours.get(), self._minutes.get(), self._seconds.get())
def get_hours(self):
"""Returns the hours."""
return self._hours
def get_minutes(self):
"""Returns the minutes."""
return self._minutes
def get_seconds(self):
"""Returns the seconds."""
return self._seconds
def set_time(self, hours, minutes, seconds):
"""Sets the time to an amount."""
if hours > 0:
self._hours.set(hours)
if minutes > 0:
self._minutes.set(minutes)
if seconds > 0:
self._seconds.set(seconds)
self.check_time()
def increase_time(self, hours, minutes, seconds):
"""Increases the time by an amount."""
self.set_time(self._hours.get() + hours, self._minutes.get() + minutes, self._seconds.get() + seconds)
self.check_time()
def check_time(self):
"""Checks the time and increments it if it's over."""
if self._seconds.get() >= 60:
self._seconds.set(0)
self._minutes.set(self._minutes.get() + 1)
if self._minutes.get() >= 60:
self._minutes.set(0)
self._hours.set(self._hours.get() + 1)
if self._hours.get() >= 24:
self._hours.set(0)
|
mit
| -9,133,802,816,021,608,000
| 27.371429
| 110
| 0.552367
| false
| 3.69145
| false
| false
| false
|
Sinar/telus
|
lib/pymg3.py
|
1
|
7566
|
#!/usr/bin/python
"""
This is a module for dealing with MongoDB via PyMongo.
Use this module to manage databases and collections in MongoDB using
the Python driver, PyMongo. The API operation commands have slight
differences between `mongo` shell and `pymongo` in Python scripts.
MongoDB manual (https://docs.mongodb.com/manual/) has notable links
to Getting Started Guide. For writing codes in Python scripts, look
into "Python Edition" instead of "mongo Shell Edition".
For full reference, see MongoDB Ecosystem - Python MongoDB drivers
(https://docs.mongodb.com/ecosystem/drivers/python/) that provides
links to API documentation and other resources.
This module was written with API operation commands that are valid
for PyMongo 3.0 and newer. Avoid deprecated API mentioned by docs.
"""
from __future__ import print_function
import json
import pymongo
from bson.objectid import ObjectId
from bson import json_util
def test_conn(host, port):
"""Test connection to MongoDB server."""
try:
client = pymongo.MongoClient(
host,
port,
connectTimeoutMS=2000,
serverSelectionTimeoutMS=3000)
client.admin.command("ismaster")
except pymongo.errors.ConnectionFailure:
print('Failed to connect')
raise RuntimeError('Server not available', host, port)
else:
print('Connected to server')
return client
def get_conn(host, port):
"""Return versions of MongoDB and PyMongo when available."""
client = test_conn(host, port)
server_version = client.server_info()['version']
driver_version = pymongo.version
print('Using MongoDB {0} with PyMongo {1}'.format(
server_version, driver_version))
return server_version, driver_version
def use_conn(host, port):
"""Return client for a MongoDB instance."""
client = test_conn(host, port)
return client
def set_database(client, dbname):
"""Return database with specified name on MongoDB."""
database = client[dbname]
print('Setup database: {}'.format(database.name))
return database
def set_collection(client, dbname, ccname):
"""Return collection with specified name on MongoDB."""
collection = client[dbname][ccname]
print('Setup collection: {}'.format(collection.name))
return collection
def use_setup(client, dbname, ccname):
"""Return database and collection that were setup on MongoDB."""
database = set_database(client, dbname)
collection = set_collection(client, dbname, ccname)
return database, collection
def list_objects(fpath):
"""Return list of objects from specified file."""
obj_ls = []
for each in open(fpath, 'r'):
obj_ls.append(each)
return obj_ls
def drop_objects(collection):
"""Remove all objects from specified collection if not empty."""
if collection.count() != 0:
print('{} was not empty, drop first'.format(collection.name))
collection.drop()
def find_object(collection):
"""
Return one JSON object from specified collection.
"""
obj = collection.find_one()
parse = json.dumps(obj, default=json_util.default, sort_keys=True)
return parse
def find_objects(collection, args):
"""Return JSON objects from specified collection if any."""
print('Query argument: {}'.format(args))
obj_ls = []
if type(args) is type({}):
obj = collection.find(args)
obj = list(obj)
count = 0
for each in obj:
count = count + 1
parse = json.dumps(each, default=json_util.default,
sort_keys=True)
obj_ls.append(parse)
if count == 0:
print('Not found')
return None
elif count == 1:
print('Found one object')
return obj_ls[0]
else:
print('Found {} objects in a list'.format(count))
return obj_ls
else:
print('Did not find')
raise TypeError('Unexpected type of argument', type(args))
def show_object(collection):
"""
Show one JSON object from specified collection in MongoDB. This
depends on find_object function that return an object.
"""
obj = find_object(collection)
print('Show first object: {}'.format(obj))
def show_objects(collection, args):
"""Show JSON objects from specified collection in MongoDB."""
obj = find_objects(collection, args)
if type(obj) is type(''):
print('Show target object: {}'.format(obj))
elif type(obj) is type([]):
print('Show only first 3 objects:')
num = 0
for each in obj:
print(each)
num = num + 1
if num == 3:
break
else:
raise TypeError('Unexpected type of object', type(obj))
def scan_field(obj, string):
"""Match non-empty value for specified string in JSON object."""
value = obj[string]
ismatch = False
if value != "":
ismatch = True
return ismatch
def copy_field(obj, string):
"""Return standalone object of specified string in JSON object."""
value = obj[string]
new_obj = {string: value}
return new_obj
def store_objects(collection, fpath):
"""Store objects from JSONL into MongoDB."""
print('Store objects into {}'.format(collection.name))
obj_ls = list_objects(fpath)
for each in obj_ls:
obj = json.loads(each)
collection.insert_one(obj)
print('Inserted objects: {}'.format(collection.count()))
def store_nested(client, collection, fpath):
"""
Store objects and the contained nested objects from JSONL into
MongoDB. The nested objects are expected to be found in objects
from JSONL file and have been predefined (buyer, seller).
"""
print('Store source objects and nested objects')
buyers = set_collection(client, 'telus', 'buyers')
drop_objects(buyers)
sellers = set_collection(client, 'telus', 'sellers')
drop_objects(sellers)
obj_ls = list_objects(fpath)
for each in obj_ls:
obj = json.loads(each)
buyer_string = 'offering_office' # non-OCDS
if scan_field(obj, buyer_string):
buyers.insert_one(copy_field(obj, buyer_string))
seller_string = 'contractor' # non-OCDS
if scan_field(obj, seller_string):
sellers.insert_one(copy_field(obj, seller_string))
collection.insert_one(obj)
print('Inserted buyers: {}'.format(buyers.count()))
print('Inserted sellers: {}'.format(sellers.count()))
print('Inserted source objects: {}'.format(collection.count()))
def show_nested(client, collection):
"""
Show object and the contained nested objects that have been stored
in respective collections in MongoDB. The nested objects have been
predefined (buyer, seller).
"""
print('Show source object and nested objects')
target = find_object(collection)
print('Source object: {}'.format(target))
buyers = set_collection(client, 'telus', 'buyers')
sellers = set_collection(client, 'telus', 'sellers')
print('Nested objects:')
target = json.loads(target)
buyer_string = 'offering_office' # non-OCDS
if scan_field(target, buyer_string):
show_objects(buyers, copy_field(target, buyer_string))
else:
print('No available buyer')
seller_string = 'contractor' # non-OCDS
if scan_field(target, seller_string):
show_objects(sellers, copy_field(target, seller_string))
else:
print('No available seller')
|
gpl-3.0
| -5,252,623,188,029,648,000
| 33.866359
| 70
| 0.649749
| false
| 4.061192
| false
| false
| false
|
aerospace-notebook/aerospace-notebook
|
nbimport.py
|
1
|
2719
|
"""
Module.
"""
import io, os, sys, types
from IPython import nbformat
from IPython.core.interactiveshell import InteractiveShell
def find_notebook(fullname, path=None):
"""find a notebook, given its fully qualified name and an optional path
This turns "foo.bar" into "foo/bar.ipynb"
and tries turning "Foo_Bar" into "Foo Bar" if Foo_Bar
does not exist.
"""
name = fullname.rsplit('.', 1)[-1]
if not path:
path = ['']
for d in path:
nb_path = os.path.join(d, name + ".ipynb")
if os.path.isfile(nb_path):
return nb_path
# let import Notebook_Name find "Notebook Name.ipynb"
nb_path = nb_path.replace("_", " ")
if os.path.isfile(nb_path):
return nb_path
class NotebookLoader(object):
"""Module Loader for IPython Notebooks"""
def __init__(self, path=None):
self.shell = InteractiveShell.instance()
self.path = path
def load_module(self, fullname):
"""import a notebook as a module"""
path = find_notebook(fullname, self.path)
print ("importing IPython notebook from %s" % path)
# load the notebook object
with io.open(path, 'r', encoding='utf-8') as f:
nb = nbformat.read(f, 4)
# create the module and add it to sys.modules
# if name in sys.modules:
# return sys.modules[name]
mod = types.ModuleType(fullname)
mod.__file__ = path
mod.__loader__ = self
sys.modules[fullname] = mod
# extra work to ensure that magics that would affect the user_ns
# actually affect the notebook module's ns
save_user_ns = self.shell.user_ns
self.shell.user_ns = mod.__dict__
try:
for cell in nb['cells']:
if cell['cell_type'] == 'code':
# transform the input to executable Python
code = self.shell.input_transformer_manager.transform_cell(cell['source'])
# run the code in themodule
exec(code, mod.__dict__)
finally:
self.shell.user_ns = save_user_ns
return mod
class NotebookFinder(object):
"""Module finder that locates IPython Notebooks"""
def __init__(self):
self.loaders = {}
def find_module(self, fullname, path=None):
nb_path = find_notebook(fullname, path)
if not nb_path:
return
key = path
if path:
# lists aren't hashable
key = os.path.sep.join(path)
if key not in self.loaders:
self.loaders[key] = NotebookLoader(path)
return self.loaders[key]
sys.meta_path.append(NotebookFinder())
|
bsd-3-clause
| -4,277,971,977,055,841,000
| 30.988235
| 90
| 0.579257
| false
| 3.91223
| false
| false
| false
|
vallemrv/tpvB3
|
tpv_for_eetop/tpv/controllers/arqueo.py
|
1
|
6717
|
# -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 10-May-2017
# @Email: valle.mrv@gmail.com
# @Last modified by: valle
# @Last modified time: 17-Mar-2018
# @License: Apache license vesion 2.0
from kivy.uix.anchorlayout import AnchorLayout
from kivy.properties import ObjectProperty, StringProperty
from kivy.lang import Builder
from kivy.storage.jsonstore import JsonStore
from kivy.clock import Clock
from kivy.core import Logger
from kivy.network.urlrequest import UrlRequest
from controllers.lineaarqueo import LineaArqueo
from valle_libs.tpv.impresora import DocPrint
from valle_libs.utils import parse_float
from models.db import QSon, VentasSender
from config import config
from modals import Aceptar
from glob import glob
from os import rename
from datetime import datetime
from time import strftime
import urllib
import threading
import json
Builder.load_file("view/arqueo.kv")
class Arqueo(AnchorLayout):
tpv = ObjectProperty(None)
text_cambio = StringProperty("300")
url = config.URL_SERVER+"/ventas/arquear/"
def __on_success__(self, req, result):
self.tpv.hide_spin()
if result["success"] == True:
desglose = result["desglose"]
self.tpv.mostrar_inicio()
printDoc = DocPrint()
printDoc.printDesglose("caja", self.fecha, desglose)
def __got_error__(self, req, *args):
req._resp_status = "Error"
Logger.debug("got error {0}".format(req.url))
self.tpv.hide_spin()
def __got_fail__(self, req, *args):
req._resp_status = "Fail"
Logger.debug("got fail {0}".format(req.url))
self.tpv.hide_spin()
def __got_redirect__(self, req, *args):
req._resp_status = "Redirect"
Logger.debug("got redirect {0}".format(req.url))
self.tpv.hide_spin()
def send(self, data):
SEND_DATA = {'data':json.dumps(data)}
data = urllib.urlencode(SEND_DATA)
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/json'}
r = UrlRequest(self.url, on_success=self.__on_success__, req_body=data,
req_headers=headers, method="POST",
on_failure=self.__got_fail__,
on_error=self.__got_error__,
on_redirect=self.__got_redirect__)
def nuevo_arqueo(self):
self.lista_conteo = []
self.lista_gastos = []
self.lista_ticket = []
self.lista_ingresos = []
self.fecha = ""
self.caja_dia = 0.0
self.efectivo = 0.0
self.tarjeta = 0.0
self.total_gastos = 0.0
self.conteo.rm_all_widgets()
self.gastos.rm_all_widgets()
self.ingresos.rm_all_widgets()
sender = VentasSender()
sender.filter(QSon("Pedidos", estado__contains="NPG"))
sender.send(self.comprobar_npg, wait=False)
def comprobar_npg(self, req, r):
if r["success"] == True:
if len(r["get"]['pedidos']) > 0:
self.aceptar = Aceptar(onExit=self.salir_arqueo)
self.aceptar.open()
def salir_arqueo(self):
if self.aceptar != None:
self.aceptar.dismiss()
self.tpv.mostrar_inicio()
def arquear(self):
self.fecha = str(datetime.now())
if self.cambio == "":
self.cambio = 300.00
self.lista_conteo = sorted(self.lista_conteo, key=lambda k: k["tipo"],
reverse=True)
self.run_arqueo()
def run_arqueo(self):
arqueo = {'caja_dia': self.caja_dia,
'efectivo':self.efectivo,
'cambio':self.cambio,
'total_gastos':self.total_gastos,
'tarjeta':self.tarjeta,
'descuadre':0,
'conteo':[],
'gastos':[],
'extras': []}
for conteo in self.lista_conteo:
arqueo['conteo'].append(conteo)
for gasto in self.lista_gastos:
arqueo['gastos'].append(gasto)
for ing in self.lista_ingresos:
arqueo['extras'].append(ing)
self.send(arqueo)
self.tpv.show_spin()
def add_conteo(self, _can, _tipo):
can = _can.text
tipo = parse_float(_tipo.text)
_can.text = _tipo.text = ""
linea = LineaArqueo(borrar=self.borrar_conteo)
texto_tipo = "Monedas" if tipo < 5 else "Billetes"
linea.text = u"{0: >5} {1} de {2}".format(can, texto_tipo, tipo)
linea.total = parse_float(can) * tipo
linea.tag = {"can": can, "tipo": tipo,
"texto_tipo": texto_tipo,
"total": linea.total}
self.efectivo += linea.total
self.lista_conteo.append(linea.tag)
self.conteo.add_linea(linea)
def borrar_conteo(self, linea):
self.efectivo -= linea.total
self.lista_conteo.remove(linea.tag)
self.conteo.rm_linea(linea)
def add_gasto(self, _des, _gasto):
des = _des.text
gasto = _gasto.text
_des.text = _gasto.text = ""
linea = LineaArqueo(borrar=self.borrar_gasto)
linea.text = u"{0} ".format(des)
linea.total = parse_float(gasto)
linea.tag = {"des": des, "gasto": gasto}
self.total_gastos += linea.total
self.lista_gastos.append(linea.tag)
self.gastos.add_linea(linea)
def borrar_gasto(self, linea):
self.total_gastos -= linea.total
self.lista_gastos.remove(linea.tag)
self.gastos.rm_linea(linea)
def add_ingreso(self, num_pd, importe, modo_pago):
_num_pd = num_pd.text
_importe = importe.text
linea = LineaArqueo(borrar=self.borrar_ingreso)
_modo_pago = "Efectivo" if not modo_pago.active else "Tarjeta"
linea.text = u"Peddos {0} modo pago {1} ".format(_num_pd, _modo_pago)
linea.total = parse_float(_importe)
linea.tag = {"numero_pedido": _num_pd, "importe": _importe,
"modo_pago": _modo_pago, "estado": "arqueado"}
if _modo_pago == "Tarjeta":
self.tarjeta += linea.total
else:
self.caja_dia += linea.total
num_pd.text = importe.text = ""
modo_pago.active = False
self.lista_ingresos.append(linea.tag)
self.ingresos.add_linea(linea)
def borrar_ingreso(self, linea):
modo_pago = linea.tag.get("modo_pago")
if modo_pago == "Tarjeta":
self.tarjeta -= linea.total
else:
self.caja_dia -= linea.total
self.lista_ingresos.remove(linea.tag)
self.ingresos.rm_linea(linea)
|
apache-2.0
| -6,670,678,691,411,380,000
| 31.765854
| 79
| 0.57749
| false
| 3.053182
| false
| false
| false
|
kingname/Bi_BiBi
|
jikexueyuan/webControlWithDataBase.py
|
1
|
1508
|
#--coding:utf8--
from flask.ext.bootstrap import Bootstrap
from flask import Flask, render_template, redirect
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, TextAreaField
from util.DataBaseManager import DataBaseManager
app = Flask(__name__)
bootstrap = Bootstrap(app)
app. config['SECRET_KEY'] = 'youcouldneverknowhis-name'
app.config.from_object(__name__)
class contentForm(Form):
commandInConfig = StringField(u'')
commandInWrite = TextAreaField(u'', default="")
sendCommand = SubmitField(u'发送命令')
clearCommand = SubmitField(u'清空命令')
@app.route('/', methods=['GET', 'POST'])
def index():
form = contentForm()
dataBaseManager = DataBaseManager()
if form.validate_on_submit():
innerCommand = form.commandInConfig.data
writeCommand = form.commandInWrite.data
if not (innerCommand or writeCommand):
errorinfo = u'内置命令和自定义代码至少要写一个!'
form.commandInWrite.data = ''
form.commandInConfig.data = ''
return render_template('index.html', form=form, errorinfo=errorinfo)
else:
info = {'innerCommand': innerCommand, 'writeCommand': writeCommand, 'run': False}
dataBaseManager.insert(info)
return redirect('/')
return render_template('index.html', form=form, errorinfo='')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80, threaded=True, debug=True)
app.run(processes=10)
|
gpl-3.0
| -7,558,096,615,025,651,000
| 34.560976
| 93
| 0.674897
| false
| 3.446809
| false
| false
| false
|
Arello-Mobile/sphinx-confluence
|
sphinx_confluence/__init__.py
|
1
|
20132
|
# -*- coding: utf-8 -*-
"""
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format
"""
from distutils.version import LooseVersion
import os
from docutils import nodes
from docutils.parsers.rst import directives, Directive, roles
from docutils.parsers.rst.directives import images
from docutils.parsers.rst.roles import set_classes
import sphinx
try:
from sphinx.builders.html import JSONHTMLBuilder
except ImportError:
from sphinxcontrib.serializinghtml import JSONHTMLBuilder
from sphinx.directives.code import CodeBlock
from sphinx.locale import _
from sphinx.writers.html import HTMLTranslator
def true_false(argument):
return directives.choice(argument, ('true', 'false'))
def static_dynamic(argument):
return directives.choice(argument, ('static', 'dynamic'))
class TitlesCache(object):
titles = {}
@staticmethod
def _document_key(document):
return hash(document)
@classmethod
def set_title(cls, document, title):
cls.titles[cls._document_key(document)] = title
@classmethod
def get_title(cls, document):
return cls.titles.get(cls._document_key(document), None)
@classmethod
def has_title(cls, document):
return cls._document_key(document) in cls.titles
class JSONConfluenceBuilder(JSONHTMLBuilder):
"""For backward compatibility"""
name = 'json_conf'
def __init__(self, app):
super(JSONConfluenceBuilder, self).__init__(app)
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
self.translator_class = HTMLConfluenceTranslator
self.warn('json_conf builder is deprecated and will be removed in future releases')
class HTMLConfluenceTranslator(HTMLTranslator):
def unimplemented_visit(self, node):
self.builder.warn('Unimplemented visit is not implemented for node: {}'.format(node))
def unknown_visit(self, node):
self.builder.warn('Unknown visit is not implemented for node: {}'.format(node))
def visit_admonition(self, node, name=''):
"""
Info, Tip, Note, and Warning Macros
https://confluence.atlassian.com/conf58/info-tip-note-and-warning-macros-771892344.html
<ac:structured-macro ac:name="info">
<ac:parameter ac:name="icon">false</ac:parameter>
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:rich-text-body>
<p>
This is important information.
</p>
</ac:rich-text-body>
</ac:structured-macro>
"""
confluence_admonition_map = {
'note': 'info',
'warning': 'note',
'attention': 'note',
'hint': 'tip',
'tip': 'tip',
'important': 'warning',
'error': 'warning',
'danger': 'warning',
}
admonition_type = confluence_admonition_map.get(name, 'info')
macro = """\
<ac:structured-macro ac:name="{admonition_type}">
<ac:parameter ac:name="icon">true</ac:parameter>
<ac:parameter ac:name="title"></ac:parameter>
<ac:rich-text-body>
"""
self.body.append(macro.format(admonition_type=admonition_type))
def depart_admonition(self, node=None):
macro = """
</ac:rich-text-body>
</ac:structured-macro>\n
"""
self.body.append(macro)
def imgtag(self, filename, suffix='\n', **attributes):
"""
Attached image
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Images
<ac:image>
<ri:attachment ri:filename="atlassian_logo.gif" />
</ac:image>
Supported image attributes (some of these attributes mirror the equivalent HTML 4 IMG element):
Name Description
---- -----------
ac:align image alignment
ac:border Set to "true" to set a border
ac:class css class attribute.
ac:title image tool tip.
ac:style css style
ac:thumbnail Set to "true" to designate this image as a thumbnail.
ac:alt alt text
ac:height image height
ac:width image width
"""
prefix = []
atts = {}
for (name, value) in attributes.items():
atts[name.lower()] = value
attlist = atts.items()
attlist = sorted(attlist)
parts = []
src_part = '<ri:attachment ri:filename="%s" />' % filename
for name, value in attlist:
# value=None was used for boolean attributes without
# value, but this isn't supported by XHTML.
assert value is not None
if isinstance(value, list):
value = u' '.join(map(unicode, value))
else:
# First assume Python 2
try:
value = unicode(value)
# Otherwise, do it the Python 3 way
except NameError:
value = str(value)
parts.append('ac:%s="%s"' % (name.lower(), self.attval(value)))
infix = '</ac:image>'
return ''.join(prefix) + '<ac:image %s>%s%s' % (' '.join(parts), src_part, infix) + suffix
def visit_image(self, node):
atts = {}
uri = node['uri']
filename = os.path.basename(uri)
atts['alt'] = node.get('alt', uri)
atts['thumbnail'] = 'true'
if 'width' in node:
atts['width'] = node['width']
if 'name' in node:
atts['title'] = node['name']
if (isinstance(node.parent, nodes.TextElement) or
(isinstance(node.parent, nodes.reference) and
not isinstance(node.parent.parent, nodes.TextElement))):
# Inline context or surrounded by <a>...</a>.
suffix = ''
else:
suffix = '\n'
self.context.append('')
self.body.append(self.imgtag(filename, suffix, **atts))
def visit_title(self, node):
if isinstance(node.parent, nodes.section) and not TitlesCache.has_title(self.document):
h_level = self.section_level + self.initial_header_level - 1
if h_level == 1:
# Confluence take first title for page title from rst
# It use for making internal links
TitlesCache.set_title(self.document, node.children[0])
# ignore first header; document must have title header
raise nodes.SkipNode
HTMLTranslator.visit_title(self, node)
def visit_target(self, node):
"""
Anchor Macro
https://confluence.atlassian.com/display/DOC/Anchor+Macro
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">here</ac:parameter>
</ac:structured-macro>
"""
# Anchor confluence macros
anchor_macros = """
<ac:structured-macro ac:name="anchor">
<ac:parameter ac:name="">%s</ac:parameter>
</ac:structured-macro>
"""
if 'refid' in node or 'refname' in node:
if 'refuri' in node:
link = node['refuri']
elif 'refid' in node:
link = node['refid']
else:
link = node['refname']
self.body.append(anchor_macros % link)
def depart_target(self, node):
pass
def visit_literal_block(self, node):
"""
Code Block Macro
https://confluence.atlassian.com/display/DOC/Code+Block+Macro
<ac:structured-macro ac:name="code">
<ac:parameter ac:name="title">This is my title</ac:parameter>
<ac:parameter ac:name="theme">FadeToGrey</ac:parameter>
<ac:parameter ac:name="linenumbers">true</ac:parameter>
<ac:parameter ac:name="language">xml</ac:parameter>
<ac:parameter ac:name="firstline">0001</ac:parameter>
<ac:parameter ac:name="collapse">true</ac:parameter>
<ac:plain-text-body><![CDATA[<b>This is my code</b>]]></ac:plain-text-body>
</ac:structured-macro>
"""
parts = ['<ac:structured-macro ac:name="code">']
if 'language' in node:
# Collapsible argument
if node['language'] == 'collapse':
parts.append('<ac:parameter ac:name="collapse">true</ac:parameter>')
valid = ['actionscript3', 'bash', 'csharp', 'coldfusion', 'cpp', 'css', 'delphi', 'diff', 'erlang',
'groovy', 'html/xml', 'java', 'javafx', 'javascript', 'none', 'perl', 'php', 'powershell',
'python', 'ruby', 'scala', 'sql', 'vb']
if node['language'] not in valid:
node['language'] = 'none'
parts.append('<ac:parameter ac:name="language">%s</ac:parameter>' % node['language'])
if 'linenos' in node and node['linenos']:
parts.append('<ac:parameter ac:name="linenumbers">true</ac:parameter>')
if 'caption' in node and node['caption']:
parts.append('<ac:parameter ac:name="title">%s</ac:parameter>' % node['caption'])
parts.append('<ac:plain-text-body><![CDATA[%s]]></ac:plain-text-body>' % node.rawsource)
parts.append('</ac:structured-macro>')
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_download_reference(self, node):
"""
Link to an attachment
https://confluence.atlassian.com/display/DOC/Confluence+Storage+Format#ConfluenceStorageFormat-Links
<ac:link>
<ri:attachment ri:filename="atlassian_logo.gif" />
<ac:plain-text-link-body><![CDATA[Link to a Confluence Attachment]]></ac:plain-text-link-body>
</ac:link>
"""
if 'filename' not in node:
self.context.append('')
return
text = None
if len(node.children) > 0 and len(node.children[0].children) > 0:
text = node.children[0].children[0]
parts = [
'<ac:link>',
'<ri:attachment ri:filename="%s" />' % node['filename'],
'<ac:plain-text-link-body>',
'<![CDATA[%s]]>' % text if text else '',
'</ac:plain-text-link-body>',
'</ac:link>',
]
self.body.append(''.join(parts))
raise nodes.SkipNode
def visit_section(self, node):
# removed section open tag
self.section_level += 1
def depart_section(self, node):
# removed section close tag
self.section_level -= 1
def visit_reference(self, node):
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
else:
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refuri']
if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
self.in_mailto = 1
else:
assert 'refid' in node, 'References must have "refuri" or "refid" attribute.'
atts['href'] = ''
# Confluence makes internal links with prefix from page title
if node.get('internal') and TitlesCache.has_title(self.document):
atts['href'] += '#%s-' % TitlesCache.get_title(self.document).replace(' ', '')
atts['href'] += node['refid']
if not isinstance(node.parent, nodes.TextElement):
assert len(node) == 1 and isinstance(node[0], nodes.image)
atts['class'] += ' image-reference'
if 'reftitle' in node:
atts['title'] = node['reftitle']
self.body.append(self.starttag(node, 'a', '', **atts))
if node.get('secnumber'):
self.body.append(('%s' + self.secnumber_suffix) % '.'.join(map(str, node['secnumber'])))
def visit_desc(self, node):
""" Replace <dl> """
self.body.append(self.starttag(node, 'div', style="margin-top: 10px"))
def depart_desc(self, node):
self.body.append('</div>\n\n')
def visit_desc_signature(self, node):
""" Replace <dt> """
# the id is set automatically
self.body.append(self.starttag(
node, 'div', style='margin-left: 20px; font-weight: bold;'))
# anchor for per-desc interactive data
if node.parent['objtype'] != 'describe' and node['ids'] and node['first']:
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
""" Copy-paste from original method """
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</div>')
def visit_desc_content(self, node):
""" Replace <dd> """
self.body.append(self.starttag(
node, 'div', '', style='margin-left: 40px;'))
def depart_desc_content(self, node):
self.body.append('</div>')
def visit_table(self, node):
""" Fix ugly table border
"""
self.context.append(self.compact_p)
self.compact_p = True
classes = ' '.join(['docutils', self.settings.table_style]).strip()
self.body.append(
self.starttag(node, 'table', CLASS=classes, border="0"))
def write_colspecs(self):
""" Fix ugly column width
"""
pass
class ImageConf(images.Image):
"""
Image confluence directive
"""
def run(self):
# remove 'align' processing
# remove 'target' processing
self.options.pop('align', None)
reference = directives.uri(self.arguments[0])
self.options['uri'] = reference
set_classes(self.options)
image_node = nodes.image(self.block_text, **self.options)
self.add_name(image_node)
return [image_node]
class TocTree(Directive):
"""
Replace sphinx "toctree" directive to confluence macro
Table of Contents Macro
https://confluence.atlassian.com/display/DOC/Table+of+Contents+Macro
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>
"""
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'maxdepth': int,
'name': directives.unchanged,
'caption': directives.unchanged_required,
'glob': directives.flag,
'hidden': directives.flag,
'includehidden': directives.flag,
'titlesonly': directives.flag,
}
def run(self):
macro = """
<ac:structured-macro ac:name="toc">
<ac:parameter ac:name="style">square</ac:parameter>
<ac:parameter ac:name="minLevel">1</ac:parameter>
<ac:parameter ac:name="maxLevel">3</ac:parameter>
<ac:parameter ac:name="type">list</ac:parameter>
</ac:structured-macro>\n
"""
attributes = {'format': 'html'}
raw_node = nodes.raw('', macro, **attributes)
return [raw_node]
class JiraIssuesDirective(Directive):
"""
JIRA Issues Macro
https://confluence.atlassian.com/doc/jira-issues-macro-139380.html
<ac:structured-macro ac:name="jira" ac:schema-version="1" ac:macro-id="da6b6413-0b93-4052-af90-dbb252175860">
<ac:parameter ac:name="server">Atlassian JIRA (JAC)</ac:parameter>
<ac:parameter ac:name="columns">key,summary,created</ac:parameter>
<ac:parameter ac:name="maximumIssues">20</ac:parameter>
<ac:parameter ac:name="jqlQuery">project = CONF AND FixVersion=5.8 </ac:parameter>
<ac:parameter ac:name="serverId">146780e9-1234-312f-1243-ed0555666fa</ac:parameter>
</ac:structured-macro>
"""
required_arguments = 1
has_content = False
final_argument_whitespace = True
option_spec = {
"anonymous": true_false,
"server_id": directives.unchanged,
"baseurl": directives.unchanged,
"columns": directives.unchanged,
"count": true_false,
"height": directives.positive_int,
"title": directives.unchanged,
"render_mode": static_dynamic,
"url": directives.unchanged,
"width": directives.unchanged,
"maximum_issues": directives.positive_int
}
def run(self):
result = ['<ac:structured-macro ac:name="jira" ac:schema-version="1">']
param_macro = '<ac:parameter ac:name="{name}">{value}</ac:parameter>'
for name, value in self.options.items():
result.append(param_macro.format(name=underscore_to_camelcase(name), value=value))
jql_query = self.arguments[0]
result.append(param_macro.format(name='jqlQuery', value=jql_query))
result.append('</ac:structured-macro>')
attributes = {'format': 'html'}
raw_node = nodes.raw('', '\n'.join(result), **attributes)
return [raw_node]
class JiraIssueRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:structured-macro ac:name="jira" ac:schema-version="1">
<ac:parameter ac:name="key">{key}</ac:parameter>
<ac:parameter ac:name="showSummary">false</ac:parameter>
</ac:structured-macro>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(key=text), **attributes)], []
class JiraUserRole(roles.GenericRole):
def __call__(self, role, rawtext, text, *args, **kwargs):
macro = """\
<ac:link>
<ri:user ri:username="{username}"/>
</ac:link>
"""
attributes = {'format': 'html'}
return [nodes.raw('', macro.format(username=text), **attributes)], []
class CaptionedCodeBlock(CodeBlock):
def run(self):
ret = super(CaptionedCodeBlock, self).run()
caption = self.options.get('caption')
if caption and isinstance(ret[0], nodes.container):
container_node = ret[0]
if isinstance(container_node[0], nodes.caption):
container_node[1]['caption'] = caption
return [container_node[1]]
return ret
def underscore_to_camelcase(text):
return ''.join(word.title() if i else word for i, word in enumerate(text.split('_')))
def get_path():
from os import path
package_dir = path.abspath(path.dirname(__file__))
template_path = path.join(package_dir, 'themes')
return template_path
def setup(app):
"""
:type app: sphinx.application.Sphinx
"""
app.config.html_theme_path = [get_path()]
app.config.html_theme = 'confluence'
app.config.html_scaled_image_link = False
if LooseVersion(sphinx.__version__) >= LooseVersion("1.4"):
app.set_translator("html", HTMLConfluenceTranslator)
app.set_translator("json", HTMLConfluenceTranslator)
else:
app.config.html_translator_class = 'sphinx_confluence.HTMLConfluenceTranslator'
app.config.html_add_permalinks = ''
jira_issue = JiraIssueRole('jira_issue', nodes.Inline)
app.add_role(jira_issue.name, jira_issue)
jira_user = JiraUserRole('jira_user', nodes.Inline)
app.add_role(jira_user.name, jira_user)
app.add_directive('image', ImageConf)
app.add_directive('toctree', TocTree)
app.add_directive('jira_issues', JiraIssuesDirective)
app.add_directive('code-block', CaptionedCodeBlock)
app.add_builder(JSONConfluenceBuilder)
|
mit
| 2,380,515,453,393,359,000
| 33.179966
| 113
| 0.582853
| false
| 3.828832
| false
| false
| false
|
gsantovena/marathon
|
tests/system/test_marathon_root.py
|
1
|
28234
|
""" Test using root marathon.
This test suite imports all common tests found in marathon_common.py which are
to be tested on root marathon and MoM.
In addition it contains tests which are specific to root marathon, specifically
tests round dcos services registration and control and security.
"""
import apps
import common
import json
import os
import pytest
import requests
import retrying
import uuid
from datetime import timedelta
import dcos_service_marathon_tests
import marathon_auth_common_tests
import marathon_common_tests
import marathon_pods_tests
from shakedown.clients import marathon
from shakedown.dcos import marathon_leader_ip
from shakedown.dcos.agent import get_private_agents, get_public_agents, public_agents, required_public_agents # NOQA F401
from shakedown.dcos.cluster import dcos_1_9, dcos_version_less_than, ee_version, is_strict # NOQA F401
from shakedown.dcos.command import run_command, run_command_on_agent, run_command_on_master
from shakedown.dcos.marathon import deployment_wait, marathon_version_less_than # NOQA F401
from shakedown.dcos.master import get_all_master_ips, masters, is_multi_master, required_masters # NOQA F401
from shakedown.dcos.service import wait_for_service_endpoint
from fixtures import sse_events, wait_for_marathon_and_cleanup, user_billy, docker_ipv6_network_fixture, archive_sandboxes, install_enterprise_cli # NOQA F401
# the following lines essentially do:
# from dcos_service_marathon_tests import test_*
for attribute in dir(dcos_service_marathon_tests):
if attribute.startswith('test_'):
exec("from dcos_service_marathon_tests import {}".format(attribute))
# the following lines essentially do:
# from marathon_auth_common_tests import test_*
for attribute in dir(marathon_auth_common_tests):
if attribute.startswith('test_'):
exec("from marathon_auth_common_tests import {}".format(attribute))
# the following lines essentially do:
# from marathon_common_tests import test_*
for attribute in dir(marathon_common_tests):
if attribute.startswith('test_'):
exec("from marathon_common_tests import {}".format(attribute))
# the following lines essentially do:
# from marathon_pods_tests import test_*
for attribute in dir(marathon_pods_tests):
if attribute.startswith('test_'):
exec("from marathon_pods_tests import {}".format(attribute))
pytestmark = [pytest.mark.usefixtures('wait_for_marathon_and_cleanup')]
@pytest.fixture(scope="function")
def marathon_service_name():
return "marathon"
def setup_module(module):
# When the cluster is starting, it might happen that there is some delay in:
# - marathon leader registration with mesos
# - admin router refreshing cache (every 30s)
# We should not start our tests before marathon is accessible through service endpoint.
wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds(), path="ping")
common.cluster_info()
common.clean_up_marathon()
def teardown_module(module):
common.clean_up_marathon()
#################################################
# Root Marathon specific tests
#################################################
@masters(3)
def test_marathon_delete_leader(marathon_service_name):
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
common.abdicate_marathon_leader()
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
common.assert_marathon_leadership_changed(original_leader)
@masters(3)
def test_marathon_delete_leader_and_check_apps(marathon_service_name):
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
app_def = apps.sleep_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
# abdicate leader after app was started successfully
common.abdicate_marathon_leader()
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
common.assert_marathon_leadership_changed(original_leader)
original_leader = marathon_leader_ip()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_app_existence(expected_instances):
app = client.get_app(app_id)
assert app['tasksRunning'] == expected_instances
assert app['tasksRunning'] == expected_instances, \
"The number of running tasks is {}, but {} was expected".format(app["tasksRunning"], expected_instances)
# check if app definition is still there and one instance is still running after new leader was elected
check_app_existence(1)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def remove_app(app_id):
client.remove_app(app_id)
remove_app(app_id)
deployment_wait(service_id=app_id)
try:
client.get_app(app_id)
except Exception:
pass
else:
assert False, "The application resurrected"
# abdicate leader after app was started successfully
common.abdicate_marathon_leader()
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
common.assert_marathon_leadership_changed(original_leader)
# check if app definition is still not there
try:
client.get_app(app_id)
except Exception:
pass
else:
assert False, "The application resurrected"
@masters(3)
def test_marathon_zk_partition_leader_change(marathon_service_name):
original_leader = common.get_marathon_leader_not_on_master_leader_node()
common.block_iptable_rules_for_seconds(original_leader, 2181, sleep_seconds=30)
common.assert_marathon_leadership_changed(original_leader)
@masters(3)
def test_marathon_master_partition_leader_change(marathon_service_name):
original_leader = common.get_marathon_leader_not_on_master_leader_node()
# blocking outbound connection to mesos master
# Marathon has a Mesos heartbeat interval of 15 seconds. If 5 are missed it
# disconnects. Thus we should wait more than 75 seconds.
common.block_iptable_rules_for_seconds(original_leader, 5050, sleep_seconds=100,
block_input=False, block_output=True)
common.assert_marathon_leadership_changed(original_leader)
@public_agents(1)
def test_launch_app_on_public_agent():
""" Test the successful launch of a mesos container on public agent.
MoMs by default do not have slave_public access.
"""
client = marathon.create_client()
app_def = common.add_role_constraint_to_app_def(apps.mesos_app(), ['slave_public'])
app_id = app_def["id"]
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
task_ip = tasks[0]['host']
assert task_ip in get_public_agents(), "The application task got started on a private agent"
@pytest.mark.skipif("is_strict()") # NOQA F811
@pytest.mark.skipif('marathon_version_less_than("1.3.9")')
@pytest.mark.usefixtures("wait_for_marathon_and_cleanup")
@pytest.mark.asyncio
async def test_event_channel(sse_events):
""" Tests the event channel. The way events are verified is by converting
the parsed events to an iterator and asserting the right oder of certain
events. Unknown events are skipped.
"""
await common.assert_event('event_stream_attached', sse_events)
app_def = apps.mesos_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
await common.assert_event('deployment_info', sse_events)
await common.assert_event('deployment_step_success', sse_events)
client.remove_app(app_id, True)
deployment_wait(service_id=app_id)
await common.assert_event('app_terminated_event', sse_events)
@dcos_1_9
@pytest.mark.skipif("is_strict()")
def test_external_volume():
volume_name = "marathon-si-test-vol-{}".format(uuid.uuid4().hex)
app_def = apps.external_volume_mesos_app()
app_def["container"]["volumes"][0]["external"]["name"] = volume_name
app_id = app_def['id']
# Tested with root marathon since MoM doesn't have
# --enable_features external_volumes option activated.
# First deployment should create the volume since it has a unique name
try:
print('INFO: Deploying {} with external volume {}'.format(app_id, volume_name))
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
# Create the app: the volume should be successfully created
common.assert_app_tasks_running(client, app_def)
common.assert_app_tasks_healthy(client, app_def)
# Scale down to 0
print('INFO: Scaling {} to 0 instances'.format(app_id))
client.stop_app(app_id)
deployment_wait(service_id=app_id)
# Scale up again: the volume should be successfully reused
print('INFO: Scaling {} back to 1 instance'.format(app_id))
client.scale_app(app_id, 1)
deployment_wait(service_id=app_id)
common.assert_app_tasks_running(client, app_def)
common.assert_app_tasks_healthy(client, app_def)
# Remove the app to be able to remove the volume
print('INFO: Finally removing {}'.format(app_id))
client.remove_app(app_id)
deployment_wait(service_id=app_id)
except Exception as e:
print('Fail to test external volumes: {}'.format(e))
raise e
finally:
# Clean up after the test: external volumes are not destroyed by marathon or dcos
# and have to be cleaned manually.
cmd = 'sudo /opt/mesosphere/bin/dvdcli remove --volumedriver=rexray --volumename={}'.format(volume_name)
removed = False
for agent in get_private_agents():
status, output = run_command_on_agent(agent, cmd) # NOQA
print('DEBUG: Failed to remove external volume with name={} on agent={}: {}'.format(
volume_name, agent, output))
if status:
removed = True
# Note: Removing the volume might fail sometimes because EC2 takes some time (~10min) to recognize that
# the volume is not in use anymore hence preventing it's removal. This is a known pitfall: we log the error
# and the volume should be cleaned up manually later.
if not removed:
print('WARNING: Failed to remove external volume with name={}'.format(volume_name))
else:
print('DEBUG: External volume with name={} successfully removed'.format(volume_name))
@pytest.mark.skipif('is_multi_master() or marathon_version_less_than("1.5")')
def test_marathon_backup_and_restore_leader(marathon_service_name):
"""Backup and restore meeting is done with only one master since new master has to be able
to read the backup file that was created by the previous master and the easiest way to
test it is when there is 1 master
"""
backup_file = 'backup.tar'
backup_dir = '/tmp'
backup_url = 'file://{}/{}'.format(backup_dir, backup_file)
# Deploy a simple test app. It is expected to be there after leader reelection
app_def = apps.sleep_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
task_id = app['tasks'][0]['id']
# Abdicate the leader with backup and restore
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
params = '?backup={}&restore={}'.format(backup_url, backup_url)
print('DELETE /v2/leader{}'.format(params))
common.abdicate_marathon_leader(params)
# Wait for new leader (but same master server) to be up and ready
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
assert task_id == app['tasks'][0]['id'], "Task has a different ID after restore"
# Check if the backup file exits and is valid
cmd = 'tar -tf {}/{} | wc -l'.format(backup_dir, backup_file)
status, data = run_command_on_master(cmd)
assert status, 'Failed to validate backup file {}'.format(backup_url)
assert int(data.rstrip()) > 0, "Backup file is empty"
# Regression for MARATHON-7525, introduced in MARATHON-7538
@masters(3)
@pytest.mark.skipif('marathon_version_less_than("1.5")')
def test_marathon_backup_and_check_apps(marathon_service_name):
backup_file1 = 'backup1.tar'
backup_file2 = 'backup2.tar'
backup_dir = '/tmp'
for master_ip in get_all_master_ips():
run_command(master_ip, "rm {}/{}".format(backup_dir, backup_file1))
run_command(master_ip, "rm {}/{}".format(backup_dir, backup_file2))
backup_url1 = 'file://{}/{}'.format(backup_dir, backup_file1)
backup_url2 = 'file://{}/{}'.format(backup_dir, backup_file2)
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
app_def = apps.sleep_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
app = client.get_app(app_id)
assert app['tasksRunning'] == 1, "The number of running tasks is {}, but 1 was expected".format(app["tasksRunning"])
# Abdicate the leader with backup
original_leader = marathon_leader_ip()
params = '?backup={}'.format(backup_url1)
common.abdicate_marathon_leader(params)
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
common.assert_marathon_leadership_changed(original_leader)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_app_existence(expected_instances):
try:
app = client.get_app(app_id)
except Exception as e:
if expected_instances != 0:
raise e
else:
if expected_instances == 0:
assert False, "The application resurrected"
else:
app['tasksRunning'] == expected_instances, \
"The number of running tasks is {}, but {} was expected".format(
app["tasksRunning"], expected_instances)
# check if app definition is still there and one instance is still running after new leader was elected
check_app_existence(1)
# then remove
client.remove_app(app_id)
deployment_wait(service_id=app_id)
check_app_existence(0)
# Do a second backup. Before MARATHON-7525 we had the problem, that doing a backup after an app was deleted
# leads to the state that marathon was not able to re-start, because the second backup failed constantly.
# Abdicate the leader with backup
original_leader = marathon_leader_ip()
print('leader: {}'.format(original_leader))
params = '?backup={}'.format(backup_url2)
print('DELETE /v2/leader{}'.format(params))
common.abdicate_marathon_leader(params)
wait_for_service_endpoint(marathon_service_name, timedelta(minutes=5).total_seconds(), path="ping")
# wait until leader changed
# if leader changed, this means that marathon was able to start again, which is great :-).
common.assert_marathon_leadership_changed(original_leader)
# check if app definition is still not there and no instance is running after new leader was elected
check_app_existence(0)
@common.marathon_1_5
@pytest.mark.skipif("ee_version() is None")
@pytest.mark.skipif("common.docker_env_not_set()")
def test_private_repository_mesos_app():
"""Deploys an app with a private Docker image, using Mesos containerizer.
It relies on the global `install_enterprise_cli` fixture to install the
enterprise-cli-package.
"""
username = os.environ['DOCKER_HUB_USERNAME']
password = os.environ['DOCKER_HUB_PASSWORD']
secret_name = "pullconfig"
secret_value_json = common.create_docker_pull_config_json(username, password)
secret_value = json.dumps(secret_value_json)
app_def = apps.private_ucr_docker_app()
app_id = app_def["id"]
# In strict mode all tasks are started as user `nobody` by default and `nobody`
# doesn't have permissions to write to /var/log within the container.
if is_strict():
app_def['user'] = 'root'
common.add_dcos_marathon_user_acls()
common.create_secret(secret_name, secret_value)
client = marathon.create_client()
try:
client.add_app(app_def)
deployment_wait(service_id=app_id)
common.assert_app_tasks_running(client, app_def)
finally:
common.delete_secret(secret_name)
@pytest.mark.skipif('marathon_version_less_than("1.5")')
@pytest.mark.skipif("ee_version() is None")
def test_app_file_based_secret(secret_fixture):
secret_name, secret_value = secret_fixture
secret_container_path = 'mysecretpath'
app_id = '/app-fbs-{}'.format(uuid.uuid4().hex)
# In case you're wondering about the `cmd`: secrets are mounted via tmpfs inside
# the container and are not visible outside, hence the intermediate file
app_def = {
"id": app_id,
"instances": 1,
"cpus": 0.5,
"mem": 64,
"cmd": "cat {} >> {}_file && /opt/mesosphere/bin/python -m http.server $PORT_API".format(
secret_container_path, secret_container_path),
"container": {
"type": "MESOS",
"volumes": [{
"containerPath": secret_container_path,
"secret": "secret1"
}]
},
"portDefinitions": [{
"port": 0,
"protocol": "tcp",
"name": "api",
"labels": {}
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, 'Failed to start the file based secret app'
port = tasks[0]['ports'][0]
host = tasks[0]['host']
# The secret by default is saved in $MESOS_SANDBOX/.secrets/path/to/secret
cmd = "curl {}:{}/{}_file".format(host, port, secret_container_path)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed. status = {}, data = {}".format(cmd, status, data)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
value_check()
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_app_secret_env_var(secret_fixture):
secret_name, secret_value = secret_fixture
app_id = '/app-secret-env-var-{}'.format(uuid.uuid4().hex)
app_def = {
"id": app_id,
"instances": 1,
"cpus": 0.5,
"mem": 64,
"cmd": "echo $SECRET_ENV >> $MESOS_SANDBOX/secret-env && /opt/mesosphere/bin/python -m http.server $PORT_API",
"env": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"portDefinitions": [{
"port": 0,
"protocol": "tcp",
"name": "api",
"labels": {}
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_app(app_def)
deployment_wait(service_id=app_id)
tasks = client.get_tasks(app_id)
assert len(tasks) == 1, 'Failed to start the secret environment variable app'
port = tasks[0]['ports'][0]
host = tasks[0]['host']
cmd = "curl {}:{}/secret-env".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed".format(cmd)
assert data.rstrip() == secret_value
value_check()
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_app_inaccessible_secret_env_var():
secret_name = '/some/secret' # Secret in an inaccessible namespace
app_id = '/app-inaccessible-secret-env-var-{}'.format(uuid.uuid4().hex)
app_def = {
"id": app_id,
"instances": 1,
"cpus": 0.1,
"mem": 64,
"cmd": "echo \"shouldn't be called anyway\"",
"env": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"portDefinitions": [{
"port": 0,
"protocol": "tcp",
"name": "api",
"labels": {}
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
with pytest.raises(requests.HTTPError) as excinfo:
client.add_app(app_def)
print('An app with an inaccessible secret could not be deployed because: {}'.format(excinfo.value))
assert excinfo.value.response.status_code == 422
assert 'Secret {} is not accessible'.format(secret_name) in excinfo.value.response.text
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_pod_inaccessible_secret_env_var():
secret_name = '/some/secret' # Secret in an inaccessible namespace
pod_id = '/pod-inaccessible-secret-env-var-{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.1,
"mem": 64
},
"exec": {
"command": {
"shell": "echo \"shouldn't be called anyway\""
}
}
}],
"environment": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"networks": [{
"mode": "host"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
with pytest.raises(requests.HTTPError) as excinfo:
client.add_pod(pod_def)
print('A pod with an inaccessible secret could not be deployed because: {}'.format(excinfo.value))
assert excinfo.value.response.status_code == 422
assert 'Secret {} is not accessible'.format(secret_name) in excinfo.value.response.text
@dcos_1_9
@pytest.mark.skipif("ee_version() is None")
def test_pod_secret_env_var(secret_fixture):
secret_name, secret_value = secret_fixture
pod_id = '/pod-secret-env-var-{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.5,
"mem": 64
},
"endpoints": [{
"name": "http",
"hostPort": 0,
"protocol": [
"tcp"
]}
],
"exec": {
"command": {
"shell": "echo $SECRET_ENV && "
"echo $SECRET_ENV >> $MESOS_SANDBOX/secret-env && "
"/opt/mesosphere/bin/python -m http.server $ENDPOINT_HTTP"
}
}
}],
"environment": {
"SECRET_ENV": {
"secret": "secret1"
}
},
"networks": [{
"mode": "host"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_pod(pod_def)
deployment_wait(service_id=pod_id)
instances = client.show_pod(pod_id)['instances']
assert len(instances) == 1, 'Failed to start the secret environment variable pod'
port = instances[0]['containers'][0]['endpoints'][0]['allocatedHostPort']
host = instances[0]['networks'][0]['addresses'][0]
cmd = "curl {}:{}/secret-env".format(host, port)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed. status = {}, data = {}".format(cmd, status, data)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
value_check()
@pytest.mark.skipif('marathon_version_less_than("1.5")')
@pytest.mark.skipif("ee_version() is None")
def test_pod_file_based_secret(secret_fixture):
secret_name, secret_value = secret_fixture
secret_normalized_name = secret_name.replace('/', '')
pod_id = '/pod-fbs-{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.5,
"mem": 64
},
"endpoints": [{
"name": "http",
"hostPort": 0,
"protocol": [
"tcp"
]}
],
"exec": {
"command": {
"shell": "cat {} >> {}_file && /opt/mesosphere/bin/python -m http.server $ENDPOINT_HTTP".format(
secret_normalized_name, secret_normalized_name),
}
},
"volumeMounts": [{
"name": "vol",
"mountPath": './{}'.format(secret_name)
}],
}],
"networks": [{
"mode": "host"
}],
"volumes": [{
"name": "vol",
"secret": "secret1"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_pod(pod_def)
deployment_wait(service_id=pod_id)
instances = client.show_pod(pod_id)['instances']
assert len(instances) == 1, 'Failed to start the file based secret pod'
port = instances[0]['containers'][0]['endpoints'][0]['allocatedHostPort']
host = instances[0]['networks'][0]['addresses'][0]
cmd = "curl {}:{}/{}_file".format(host, port, secret_normalized_name)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def value_check():
status, data = run_command_on_master(cmd)
assert status, "{} did not succeed. status = {}, data = {}".format(cmd, status, data)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
value_check()
# Uncomment to run a quick and sure-to-pass SI test on any cluster. Useful for running SI tests locally
# from fixtures import parent_group
# def test_foo(parent_group):
# client = marathon.create_client()
# app_def = apps.sleep_app(parent_group=parent_group)
# app_id = app_def['id']
# client.add_app(app_def)
# deployment_wait(service_id=app_id)
#
# tasks = client.get_tasks(app_id)
# assert len(tasks) == 1, 'Failed to start a simple sleep app'
@pytest.fixture(scope="function")
def secret_fixture():
secret_name = '/mysecret'
secret_value = 'super_secret_password'
common.create_secret(secret_name, secret_value)
yield secret_name, secret_value
common.delete_secret(secret_name)
|
apache-2.0
| -217,581,934,169,658,050
| 34.073292
| 158
| 0.624495
| false
| 3.681575
| true
| false
| false
|
kubevirt/client-python
|
kubevirt/models/k8s_io_apimachinery_pkg_apis_meta_v1_time.py
|
1
|
2430
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class K8sIoApimachineryPkgApisMetaV1Time(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self):
"""
K8sIoApimachineryPkgApisMetaV1Time - a model defined in Swagger
"""
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, K8sIoApimachineryPkgApisMetaV1Time):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
apache-2.0
| 7,947,710,263,294,770,000
| 23.545455
| 77
| 0.518519
| false
| 4.300885
| false
| false
| false
|
chrisvans/roasttron
|
docs/conf.py
|
1
|
7821
|
# -*- coding: utf-8 -*-
#
# roasttron documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'roasttron'
copyright = u"2015, Chris VanSchyndel"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'roasttrondoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'roasttron.tex',
u'roasttron Documentation',
u"Chris VanSchyndel", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'roasttron', u'roasttron Documentation',
[u"Chris VanSchyndel"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'roasttron', u'roasttron Documentation',
u"Chris VanSchyndel", 'roasttron',
'Roast Profiling Application', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
mit
| 6,534,412,259,365,037,000
| 30.922449
| 80
| 0.697737
| false
| 3.769157
| true
| false
| false
|
untitaker/python-webuntis
|
webuntis/objects.py
|
1
|
29012
|
"""
This file is part of python-webuntis
:copyright: (c) 2013 by Markus Unterwaditzer.
:license: BSD, see LICENSE for more details.
"""
import datetime
from webuntis.utils import datetime_utils, lazyproperty, \
timetable_utils
class Result(object):
"""Base class used to represent most API objects.
:param data: Usually JSON data that should be represented.
In the case of :py:class:`ListResult`, however, it might also
be a list of JSON mixed with :py:class:`ListItem` objects.
:param parent: (optional) A result object this result should be the child
of. If given, the session will be inherited.
:param session: Mandatory if ``parent`` is not supplied. Overrides the
parent's inherited session.
"""
def __init__(self, data, parent=None, session=None):
if bool(parent is None) == bool(session is None):
raise TypeError('Either parent or session has to be provided.')
if parent is not None and not hasattr(parent, '_session'):
raise TypeError('Parent must have a _session attribute.')
self._session = session or parent._session
self._parent = parent
self._data = data
@lazyproperty
def id(self):
"""The ID of this element.
An ID is needed for the object to be hashable. Therefore a result
may bring its own implementation of this method even though the
original API response didn't contain any ID."""
return self._data[u'id'] if 'id' in self._data else None
def __int__(self):
"""This is useful if the users pass a ListItem when a numerical ID
is expected, so we just can put the thing through int(), regardless of
what type it is."""
assert self.id is not None
return self.id
def __hash__(self):
assert self.id is not None
return hash(self.__class__.__name__) * 101 + self.id
def __eq__(self, other):
return type(self) is type(other) and hash(self) == hash(other)
def __getstate__(self):
return self._data
def __setstate__(self, data):
self._data = data
def __str__(self):
"""a simple to string function: just the name or the full info -- debug only"""
try:
return self._data[u'name']
except KeyError:
try:
return self.name
except AttributeError:
return str(self._data)
except TypeError:
return str(self._data)
def __repr__(self):
try:
return self.__class__.__qualname__ + "(" + str(self._data) + ")"
except AttributeError:
return self.__class__.__name__ + "(" + str(self._data) + ")"
class ListItem(Result):
"""ListItems represent an item in a
:py:class:`Result`. They don\'t contain methods to
retrieve data."""
class ListResult(Result):
"""A list-like version of :py:class:`Result` that takes a list and returns
a list of objects, containing a list value each.
"""
# When the Result returns an array, this is very useful. Every item of that
# array will be fed to an instance of self._itemclass, with the session and
# the array item as initialization arguments.
#: the class which should be used to instantiate an array item.
_itemclass = ListItem
def filter(self, **criterions):
"""
Return a list of all objects, filtered by attributes::
foo = s.klassen().filter(id=1) # is kind-of the same as
foo = [kl for kl in s.klassen() if kl.id == 1]
# We can also use sets to match multiple values.
bar = s.klassen().filter(name={'1A', '2A', '3A', '4A'})
# is kind-of the same as
bar = [kl for kl in s.klassen()
if kl.id in {'1A', '2A', '3A', '4A'}]
# Or you can use a list: this keeps the order: the first element
# of the result corresponds to the first element in the filter
# Important after using combine()
bar = s.klassen().filter(name=['1A', '2A', '3A', '4A'])
# --> bar[0].name == '1A'
# Since ``filter`` returns a ListResult itself too, we can chain
# multiple calls together:
bar = s.klassen().filter(id=4, name='7A') # is the same as
bar = s.klassen().filter(id=4).filter(name='7A')
:py:meth:`filter` is also used when using the ``in`` operator on a
:py:class:`ListResult`::
we_have_it = {'name': '6A'} in s.klassen() # same as
we_have_it = bool(s.klassen().filter(name='6A'))
.. note::
This is only available because it looks nicer than list
comprehensions or generator expressions. Depending on your usecase
alternatives to this method may be faster.
"""
criterions = list(criterions.items())
def meets_criterions(item):
"""Returns true if the item meets the criterions"""
for key, value in criterions:
# if the attribute value isn't one we're looking for
attribute = getattr(item, key)
if attribute == value:
continue
elif isinstance(value, set) and attribute in value:
continue
else:
return False
return True
if isinstance(criterions[0][1], list):
return type(self)(
parent=self,
data=[self.filter(**{key: v})[0]
for key, values in criterions
for v in values
])
return type(self)(
parent=self,
data=[item for item in self if meets_criterions(item)]
)
def __contains__(self, criterion):
if isinstance(criterion, self._itemclass):
return any(item is criterion for item in self)
return bool(self.filter(**criterion))
def __getitem__(self, i):
"""Makes the object iterable and behave like a list"""
data = self._data[i] # fails if there is no such item
if type(data) is not self._itemclass:
data = self._data[i] = self._itemclass(
parent=self,
data=data
)
return data
def __len__(self):
"""Return the length of the items"""
return len(self._data)
def __hash__(self):
raise NotImplementedError()
def __eq__(self, other):
return type(other) is type(self) and other._data == self._data
def __str__(self):
"""a simple to string function: a list of results -- debug only"""
return "[" + ", ".join(str(d) for d in self._data) + "]"
def __repr__(self):
"""a simple to string function: a list of results -- debug only"""
try:
return self.__class__.__qualname__ + "[" + ", ".join(repr(d) for d in self._data) + "]"
except AttributeError:
return self.__class__.__name__ + "[" + ", ".join(repr(d) for d in self._data) + "]"
class DepartmentObject(ListItem):
"""Represents a department"""
@lazyproperty
def name(self):
"""short name such as *R1A*"""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""Long name, such as *Raum Erste A*. Not predictable."""
return self._data[u'longName']
class DepartmentList(ListResult):
"""A list of departments, in form of :py:class:`DepartmentObject`
instances."""
_itemclass = DepartmentObject
class HolidayObject(ListItem):
"""Represents a single holiday."""
@lazyproperty
def start(self):
"""The start date of the holiday, as a datetime object."""
return datetime_utils.parse_date(self._data[u'startDate'])
@lazyproperty
def end(self):
"""The end of the holiday"""
return datetime_utils.parse_date(self._data[u'endDate'])
@lazyproperty
def name(self):
"""Name, such as *Nationalfeiertag*."""
return self._data[u'longName']
@lazyproperty
def short_name(self):
"""Abbreviated form of the name"""
return self._data[u'name']
class HolidayList(ListResult):
"""A list of holidays, in form of :py:class:`HolidayObject`
instances."""
_itemclass = HolidayObject
class ColorMixin:
"""Interface support fore/back color"""
@lazyproperty
def forecolor(self):
"""The foreground color used in the web interface and elsewhere"""
return self._data[self.name][u'foreColor']
@lazyproperty
def backcolor(self):
"""The background color used in the web interface and elsewhere"""
return self._data[self.name][u'backColor']
class KlassenObject(ListItem, ColorMixin):
"""Represents a school class."""
@lazyproperty
def name(self):
"""Name of class"""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""Long name of class"""
return self._data[u'longName']
class KlassenList(ListResult):
"""A list of school classes, in form of :py:class:`KlassenObject`
instances."""
_itemclass = KlassenObject
class PeriodObject(ListItem):
"""Represents a time range, where lessons/subjects may be held."""
@lazyproperty
def start(self):
"""The start date/time of the period, as datetime object."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'startTime']
)
@lazyproperty
def end(self):
"""The end date/time of the period."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'endTime']
)
@lazyproperty
def klassen(self):
"""A :py:class:`KlassenList` containing the classes which are attending
this period."""
return self._session.klassen(from_cache=True).filter(
id=[kl[u'id'] for kl in self._data[u'kl']]
)
@lazyproperty
def teachers(self):
"""A list of :py:class:`TeacherObject` instances,
which are attending this period."""
return self._session.teachers(from_cache=True).filter(
id=[te[u'id'] for te in self._data[u'te']]
)
@lazyproperty
def subjects(self):
"""A :py:class:`SubjectList` containing the subjects which are topic of
this period. This is not used for things like multiple language lessons
(*e.g.* Latin, Spanish, French) -- each of those will get placed in
their own period."""
return self._session.subjects(from_cache=True).filter(
id=[su[u'id'] for su in self._data[u'su']]
)
@lazyproperty
def rooms(self):
"""The rooms (:py:class:`RoomList`) where this period is taking place
at. This also is not used for multiple lessons, but rather for a single
lesson that is actually occuring at multiple locations (?)."""
return self._session.rooms(from_cache=True).filter(
id=[ro[u'id'] for ro in self._data[u'ro']]
)
@lazyproperty
def code(self):
"""May be:
- ``None`` -- There's nothing special about this period.
- ``"cancelled"`` -- Cancelled
- ``"irregular"`` -- Substitution/"Supplierung"/Not planned event
"""
code = self._data.get(u'code', None)
if code in (None, u'cancelled', u'irregular'):
return code
return None
@lazyproperty
def original_teachers(self):
""" Support for original teachers """
try:
return self._session.teachers(from_cache=True).filter(id=[te[u'orgid'] for te in self._data[u'te']])
except KeyError:
pass
return []
@lazyproperty
def original_rooms(self):
""" Support for original rooms """
try:
return self._session.rooms(from_cache=True).filter(id=[ro[u'orgid'] for ro in self._data[u'ro']])
except KeyError:
pass
return []
@lazyproperty
def type(self):
"""May be:
- ``"ls"`` -- Normal lesson
- ``"oh"`` -- Office hour
- ``"sb"`` -- Standby
- ``"bs"`` -- Break Supervision
- ``"ex"`` -- Examination
"""
return self._data.get(u'lstype', u'ls')
class PeriodList(ListResult):
"""Aka timetable, a list of periods, in form of :py:class:`PeriodObject`
instances."""
_itemclass = PeriodObject
def to_table(self, dates=None, times=None):
"""
Creates a table-like structure out of the periods. Useful for rendering
timetables in HTML and other markup languages.
Check out the example from the repository for clarification.
:param dates: An iterable of :py:class:`datetime.date` objects that
definetly should be included in the table. If this parameter is
``None``, the timetable is just as wide as it has to be, leaving
out days without periods.
:param times: An iterable of :py:class:`datetime.time` objects that
definetly should be included in the table. If this parameter is
``None``, the timetable is just as tall as it has to be, leaving
out hours without periods.
:returns: A list containing "rows", which in turn contain "hours",
which contain :py:class:`webuntis.objects.PeriodObject` instances
which are happening at the same time.
"""
return timetable_utils.table(self, dates=dates, times=times)
def combine(self, combine_breaks=True):
"""
Combine consecutive entries
:param combine_breaks: combine of breaks
:return:
"""
return timetable_utils.combine(self, {'date', 'activityType', 'su', 'kl'}, combine_breaks)
class RoomObject(ListItem, ColorMixin):
"""Represents a physical room. Such as a classroom, but also the physics
lab or whatever.
"""
@lazyproperty
def name(self):
"""The short name of the room. Such as PHY."""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""The long name of the room. Such as "Physics lab"."""
return self._data[u'longName']
class RoomList(ListResult):
"""A list of rooms, in form of :py:class:`RoomObject` instances."""
_itemclass = RoomObject
class SchoolyearObject(ListItem):
"""Represents a schoolyear."""
@lazyproperty
def name(self):
""""2010/2011\""""
return self._data[u'name']
@lazyproperty
def start(self):
"""The start date of the schoolyear, as datetime object"""
return datetime_utils.parse_date(self._data[u'startDate'])
@lazyproperty
def end(self):
"""The end date"""
return datetime_utils.parse_date(self._data[u'endDate'])
@lazyproperty
def is_current(self):
"""
Boolean, check if this is the current schoolyear::
>>> import webuntis
>>> s = webuntis.Session(...).login()
>>> y = s.schoolyears()
>>> y.current.id
7
>>> y.current.is_current
True
>>> y.filter(id=y.current.id).is_current
True
"""
return self == self._parent.current
class SchoolyearList(ListResult):
"""A list of schoolyears, in form of :py:class:`SchoolyearObject`
instances."""
_itemclass = SchoolyearObject
@lazyproperty
def current(self):
"""Returns the current schoolyear in form of a
:py:class:`SchoolyearObject`"""
current_data = self._session._request(u'getCurrentSchoolyear')
current = self.filter(id=current_data[u'id'])[0]
return current
class SubjectObject(ListItem, ColorMixin):
"""Represents a subject."""
@lazyproperty
def name(self):
"""Short name of subject, such as *PHY*"""
return self._data[u'name']
@lazyproperty
def long_name(self):
"""Long name of subject, such as *Physics*"""
return self._data[u'longName']
class SubjectList(ListResult):
"""A list of subjects, in form of :py:class:`SubjectObject` instances."""
_itemclass = SubjectObject
class PersonObject(ListItem):
"""Represents a person (teacher or student)."""
@lazyproperty
def fore_name(self):
"""fore name of the person"""
return self._data[u'foreName']
@lazyproperty
def long_name(self):
"""surname of person"""
return self._data[u'longName']
surname = long_name
@lazyproperty
def name(self):
"""full name of the person"""
return self._data[u'name']
class TeacherObject(PersonObject):
"""Represents a teacher."""
@lazyproperty
def title(self):
"""title of the teacher"""
return self._data[u'title']
@lazyproperty
def full_name(self):
"""full name of teacher (title, forname, longname"""
return " ".join((self.title, self.fore_name, self.long_name)).strip()
class TeacherList(ListResult):
"""A list of teachers, in form of :py:class:`TeacherObject` instances."""
_itemclass = TeacherObject
class ColorInfo(Result, ColorMixin):
"""
An object containing information about a lesson type or a period code::
>>> import webuntis
>>> s = webuntis.Session(...).login()
>>> lstype = s.statusdata().lesson_types[0]
>>> lstype.name
'ls'
>>> lstype.forecolor
'000000'
>>> lstype.backcolor
'ee7f00'
::
>>> pcode = s.statusdata().period_codes[0]
>>> pcode.name
'cancelled'
>>> pcode.forecolor
'FFFFFF'
>>> pcode.backcolor
'FF0000'
"""
@lazyproperty
def id(self):
return hash(self.__class__.__name__ + self.name)
@lazyproperty
def name(self):
"""The name of the LessonType or PeriodCode"""
return list(self._data.items())[0][0]
class StatusData(Result):
"""Information about lesson types and period codes and their colors."""
@lazyproperty
def lesson_types(self):
"""A list of :py:class:`ColorInfo` objects, containing
information about all lesson types defined
:rtype: `list` [ColorInfo]
"""
return [
ColorInfo(parent=self, data=data)
for data in self._data[u'lstypes']
]
@lazyproperty
def period_codes(self):
"""A list of :py:class:`ColorInfo` objects, containing
information about all period codes defined
:rtype: `list` [ColorInfo]
"""
return [
ColorInfo(parent=self, data=data)
for data in self._data[u'codes']
]
class TimeStampObject(Result):
"""Information about last change of data -- timestamp (given in milliseconds)"""
@lazyproperty
def date(self):
"""
get timestamp as python datetime object
:return: datetime.datetime
"""
return datetime.datetime.fromtimestamp(self._data / 1000)
class SubstitutionObject(PeriodObject):
"""Information about substitution."""
@lazyproperty
def type(self):
"""type of substitution
cancel cancellation
subst teacher substitution
add additional period
shift shifted period
rmchg room change
:rtype: str
"""
return self._data[u'type']
@lazyproperty
def reschedule_start(self):
"""The start of the rescheduled substitution (or None)
:return: datetime.datetime
"""
try:
return datetime_utils.parse_datetime(self._data[u'reschedule'][u'date'],
self._data[u'reschedule'][u'startTime'])
except KeyError:
return None
@lazyproperty
def reschedule_end(self):
"""The end of the rescheduled substitution (or None)
:return: datetime.datetime
"""
try:
return datetime_utils.parse_datetime(self._data[u'reschedule'][u'date'],
self._data[u'reschedule'][u'endTime'])
except KeyError:
return None
class SubstitutionList(ListResult):
"""A list of substitutions in form of :py:class:`SubstitutionObject` instances."""
_itemclass = SubstitutionObject
def combine(self, combine_breaks=True):
"""
Combine consecutive entries
:param combine_breaks: combine of breaks
:return:
"""
return timetable_utils.combine(self,
{'date', 'type', 'kl', 'su'},
combine_breaks,
lambda p: (p['type'],
str(p[u'te'] and p[u'te'][0][u'name']),
p[u'date'],
p[u'startTime']))
class TimeUnitObject(Result):
"""Information about the time grid"""
@lazyproperty
def name(self):
"""Name of Timeunit"""
return self._data[u'name']
@lazyproperty
def start(self):
return datetime_utils.parse_time(
self._data[u'startTime']
).time()
@lazyproperty
def end(self):
return datetime_utils.parse_time(
self._data[u'endTime']
).time()
class TimegridDayObject(Result):
"""Information about one day in the time grid"""
@lazyproperty
def day(self):
return self._data[u'day']
@lazyproperty
def dayname(self):
names = {1: "sunday", 2: "monday", 3: "tuesday", 4: "wednesday", 5: "thursday", 6: "friday", 7: "saturday"}
return names[self._data[u'day']]
@lazyproperty
def time_units(self):
return [
TimeUnitObject(parent=self, data=data)
for data in self._data[u'timeUnits']
]
class TimegridObject(ListResult):
"""A list of TimegridDayObjects"""
_itemclass = TimegridDayObject
class StudentObject(PersonObject):
"""Represents a student."""
@lazyproperty
def full_name(self):
"""full name of student (forname, longname)"""
return " ".join((self.fore_name, self.long_name)).strip()
@lazyproperty
def gender(self):
return self._data[u'gender']
@lazyproperty
def key(self):
return self._data[u'key']
class StudentsList(ListResult):
"""A list of students"""
_itemclass = StudentObject
class ExamTypeObject(Result):
"""Represents an Exam Type."""
@lazyproperty
def long_name(self):
"""Long name"""
return self._data[u'longName']
@lazyproperty
def name(self):
"""name"""
return self._data[u'name']
@lazyproperty
def show_in_timetable(self):
"""show this exam type in the timetable"""
return self._data[u'showInTimetable']
class ExamTypeList(ListResult):
"""A list of exam types"""
_itemclass = ExamTypeObject
class ExamObject(Result):
"""Represents an Exam."""
# classes list of classes
# teachers list of teachers
# students list of students
# subject
@lazyproperty
def start(self):
"""The start date/time of the period, as datetime object."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'startTime']
)
@lazyproperty
def end(self):
"""The end date/time of the period."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'endTime']
)
@lazyproperty
def klassen(self):
"""A :py:class:`KlassenList` containing the classes which are attending
this period."""
return self._session.klassen(from_cache=True).filter(
id=set(self._data[u'classes'])
)
@lazyproperty
def teachers(self):
"""A list of :py:class:`TeacherObject` instances,
which are attending this period."""
return self._session.teachers(from_cache=True).filter(
id=set(self._data[u'teachers'])
)
@lazyproperty
def subject(self):
"""A :py:class:`SubjectObject` with the subject which are topic of
this period."""
return self._session.subjects(from_cache=True).filter(id=self._data[u'subject'])[0]
@lazyproperty
def students(self):
"""A list of :py:class:`StudentObject` instances,
which are attending this period."""
return self._session.students(from_cache=True).filter(
id=set(self._data[u'students'])
)
class ExamsList(ListResult):
"""A list of exams."""
_itemclass = ExamObject
class AbsenceObject(Result):
"""Represents an absence.
Attention: if there are multiple teachers/groups at the same time -> multiple entries for the
same student, but the absentTime is only set for one (the first?) entry.
"""
@lazyproperty
def student(self):
"""
doku says: student ID, but it is the students KEY
:return:
"""
return self._session.students(from_cache=True).filter(key=self._data[u'studentId'])[0]
@lazyproperty
def subject(self):
"""@TODO: untested - always empty"""
try:
sid = int(self._data[u'subjectId'])
except ValueError:
return ""
return self._session.subjects(from_cache=True).filter(id=sid)[0]
@lazyproperty
def teachers(self):
"""@TODO: untested - always empty"""
try:
tes = list(int(te) for te in self._data[u'teacherIds'] if te)
except ValueError:
return []
return self._session.teachers(from_cache=True).filter(id=tes)
@lazyproperty
def student_group(self):
try:
return self._data[u'studentGroup']
except KeyError:
return ''
@lazyproperty
def checked(self):
return self._data[u'checked']
@lazyproperty
def name(self):
"""Name of absent student"""
return self.student.full_name
@lazyproperty
def start(self):
"""The start date/time of the period, as datetime object."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'startTime']
)
@lazyproperty
def end(self):
"""The end date/time of the period."""
return datetime_utils.parse_datetime(
self._data[u'date'],
self._data[u'endTime']
)
@lazyproperty
def reason(self):
try:
return self._data[u'absenceReason']
except KeyError:
return ''
@lazyproperty
def time(self):
try:
return int(self._data[u'absentTime'])
except KeyError:
return 0
@lazyproperty
def status(self):
try:
return self._data[u'excuseStatus']
except KeyError:
return ''
class AbsencesList(ListResult):
"""A list of absences."""
_itemclass = AbsenceObject
def __init__(self, data, parent=None, session=None):
# the data is a dict() with just one key
data = data[u'periodsWithAbsences']
Result.__init__(self, data, parent, session)
class ClassRegEvent(Result):
"""Represents an ClassRegEvent."""
@lazyproperty
def student(self):
"""
doku says: student ID, but it is the students KEY
:return:
"""
return self._session.students(from_cache=True).filter(key=self._data[u'studentid'])[0]
@lazyproperty
def sur_name(self):
"""sur name of the person"""
return self._data[u'surname']
@lazyproperty
def fore_name(self):
"""fore name of the person"""
return self._data[u'forname']
@lazyproperty
def name(self):
"""fore name of the person"""
return " ".join((self.sur_name, self.fore_name))
@lazyproperty
def reason(self):
"""reason of the classregevent"""
return self._data[u'reason']
@lazyproperty
def text(self):
"""text of the classregevent"""
return self._data[u'text']
@lazyproperty
def date(self):
"""the date of the classregevent."""
return datetime_utils.parse_date(self._data[u'date'])
@lazyproperty
def subject(self):
"""the subject of the classregevent."""
return self._data[u'subject']
class ClassRegEventList(ListResult):
"""A list of ClassRegEvents."""
_itemclass = ClassRegEvent
|
bsd-3-clause
| 4,595,428,751,574,091,300
| 27.555118
| 115
| 0.576106
| false
| 4.085622
| false
| false
| false
|
dims/cinder
|
cinder/volume/drivers/dell/dell_storagecenter_common.py
|
1
|
55203
|
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from cinder import exception
from cinder import objects
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume import driver
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.san.san import san_opts
from cinder.volume import volume_types
common_opts = [
cfg.IntOpt('dell_sc_ssn',
default=64702,
help='Storage Center System Serial Number'),
cfg.PortOpt('dell_sc_api_port',
default=3033,
help='Dell API port'),
cfg.StrOpt('dell_sc_server_folder',
default='openstack',
help='Name of the server folder to use on the Storage Center'),
cfg.StrOpt('dell_sc_volume_folder',
default='openstack',
help='Name of the volume folder to use on the Storage Center'),
cfg.BoolOpt('dell_sc_verify_cert',
default=False,
help='Enable HTTPS SC certificate verification.')
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(common_opts)
class DellCommonDriver(driver.ConsistencyGroupVD, driver.ManageableVD,
driver.ExtendVD, driver.ReplicaV2VD,
driver.SnapshotVD, driver.BaseVD):
def __init__(self, *args, **kwargs):
super(DellCommonDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(common_opts)
self.configuration.append_config_values(san_opts)
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'Dell'
self.backends = self.configuration.safe_get('replication_device')
self.replication_enabled = True if self.backends else False
self.is_direct_connect = False
def _bytes_to_gb(self, spacestring):
"""Space is returned in a string like ...
7.38197504E8 Bytes
Need to split that apart and convert to GB.
:returns: gbs in int form
"""
try:
n = spacestring.split(' ', 1)
fgbs = float(n[0]) / 1073741824.0
igbs = int(fgbs)
return igbs
except Exception:
# If any of that blew up it isn't in the format we
# thought so eat our error and return None
return None
def do_setup(self, context):
"""One time driver setup.
Called once by the manager after the driver is loaded.
Sets up clients, check licenses, sets up protocol
specific helpers.
"""
self._client = dell_storagecenter_api.StorageCenterApiHelper(
self.configuration)
def check_for_setup_error(self):
"""Validates the configuration information."""
with self._client.open_connection() as api:
api.find_sc()
self.is_direct_connect = api.is_direct_connect
if self.is_direct_connect and self.replication_enabled:
msg = _('Dell Cinder driver configuration error replication '
'not supported with direct connect.')
raise exception.InvalidHost(reason=msg)
if self.replication_enabled:
# Check that our replication destinations are available.
# TODO(tswanson): Check if we need a diskfolder. (Or not.)
# TODO(tswanson): Can we check that the backend specifies
# TODO(tswanson): the same ssn as target_device_id.
for backend in self.backends:
replssn = backend['target_device_id']
try:
# Just do a find_sc on it. If it raises we catch
# that and raise with a correct exception.
api.find_sc(int(replssn))
except exception.VolumeBackendAPIException:
msg = _('Dell Cinder driver configuration error '
'replication_device %s not found') % replssn
raise exception.InvalidHost(reason=msg)
def _get_volume_extra_specs(self, volume):
"""Gets extra specs for the given volume."""
type_id = volume.get('volume_type_id')
if type_id:
return volume_types.get_volume_type_extra_specs(type_id)
return {}
def _add_volume_to_consistency_group(self, api, scvolume, volume):
"""Just a helper to add a volume to a consistency group.
:param api: Dell SC API opbject.
:param scvolume: Dell SC Volume object.
:param volume: Cinder Volume object.
:returns: Nothing.
"""
if scvolume and volume.get('consistencygroup_id'):
profile = api.find_replay_profile(
volume.get('consistencygroup_id'))
if profile:
api.update_cg_volumes(profile, [volume])
def _do_repl(self, api, volume):
"""Checks if we can do replication.
Need the extra spec set and we have to be talking to EM.
:param api: Dell REST API object.
:param volume: Cinder Volume object.
:return: Boolean (True if replication enabled), Boolean (True if
replication type is sync.
"""
do_repl = False
sync = False
if not self.is_direct_connect:
specs = self._get_volume_extra_specs(volume)
do_repl = specs.get('replication_enabled') == '<is> True'
sync = specs.get('replication_type') == '<in> sync'
return do_repl, sync
def _create_replications(self, api, volume, scvolume):
"""Creates any appropriate replications for a given volume.
:param api: Dell REST API object.
:param volume: Cinder volume object.
:param scvolume: Dell Storage Center Volume object.
:return: model_update
"""
# Replication V2
# for now we assume we have an array named backends.
replication_driver_data = None
# Replicate if we are supposed to.
do_repl, sync = self._do_repl(api, volume)
if do_repl:
for backend in self.backends:
# Check if we are to replicate the active replay or not.
specs = self._get_volume_extra_specs(volume)
replact = specs.get('replication:activereplay') == '<is> True'
if not api.create_replication(scvolume,
backend['target_device_id'],
backend.get('qosnode',
'cinderqos'),
sync,
backend.get('diskfolder', None),
replact):
# Create replication will have printed a better error.
msg = _('Replication %(name)s to %(ssn)s failed.') % {
'name': volume['id'],
'ssn': backend['target_device_id']}
raise exception.VolumeBackendAPIException(data=msg)
if not replication_driver_data:
replication_driver_data = backend['target_device_id']
else:
replication_driver_data += ','
replication_driver_data += backend['target_device_id']
# If we did something return model update.
model_update = {}
if replication_driver_data:
model_update = {'replication_status': 'enabled',
'replication_driver_data': replication_driver_data}
return model_update
@staticmethod
def _cleanup_failed_create_volume(api, volumename):
try:
api.delete_volume(volumename)
except exception.VolumeBackendAPIException as ex:
LOG.info(_LI('Non fatal cleanup error: %s.'), ex.msg)
def create_volume(self, volume):
"""Create a volume."""
model_update = {}
# We use id as our name as it is unique.
volume_name = volume.get('id')
# Look for our volume
volume_size = volume.get('size')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
storage_profile = specs.get('storagetype:storageprofile')
replay_profile_string = specs.get('storagetype:replayprofiles')
LOG.debug('Creating volume %(name)s of size %(size)s',
{'name': volume_name,
'size': volume_size})
scvolume = None
with self._client.open_connection() as api:
try:
if api.find_sc():
scvolume = api.create_volume(volume_name,
volume_size,
storage_profile,
replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume %s') %
volume_name)
# Update Consistency Group
self._add_volume_to_consistency_group(api, scvolume, volume)
# Create replications. (Or not. It checks.)
model_update = self._create_replications(api, volume, scvolume)
except Exception:
# if we actually created a volume but failed elsewhere
# clean up the volume now.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is None:
raise exception.VolumeBackendAPIException(
data=_('Unable to create volume. Backend down.'))
return model_update
def _split(self, replication_driver_data):
ssnstrings = []
if replication_driver_data:
for str in replication_driver_data.split(','):
ssnstring = str.strip()
if ssnstring:
ssnstrings.append(ssnstring)
return ssnstrings
def _delete_replications(self, api, volume):
"""Delete replications associated with a given volume.
We should be able to roll through the replication_driver_data list
of SSNs and delete replication objects between them and the source
volume.
:param api: Dell REST API object.
:param volume: Cinder Volume object
:return:
"""
do_repl, sync = self._do_repl(api, volume)
if do_repl:
volume_name = volume.get('id')
scvol = api.find_volume(volume_name)
replication_driver_data = volume.get('replication_driver_data')
# This is just a string of ssns separated by commas.
ssnstrings = self._split(replication_driver_data)
# Trundle through these and delete them all.
for ssnstring in ssnstrings:
ssn = int(ssnstring)
if not api.delete_replication(scvol, ssn):
LOG.warning(_LW('Unable to delete replication of '
'Volume %(vname)s to Storage Center '
'%(sc)s.'),
{'vname': volume_name,
'sc': ssnstring})
# If none of that worked or there was nothing to do doesn't matter.
# Just move on.
def delete_volume(self, volume):
deleted = False
# We use id as our name as it is unique.
volume_name = volume.get('id')
LOG.debug('Deleting volume %s', volume_name)
with self._client.open_connection() as api:
try:
if api.find_sc():
self._delete_replications(api, volume)
deleted = api.delete_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to delete volume %s'),
volume_name)
# if there was an error we will have raised an
# exception. If it failed to delete it is because
# the conditions to delete a volume were not met.
if deleted is False:
raise exception.VolumeIsBusy(volume_name=volume_name)
def create_snapshot(self, snapshot):
"""Create snapshot"""
# our volume name is the volume id
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Creating snapshot %(snap)s on volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if scvolume is not None:
if api.create_replay(scvolume,
snapshot_id,
0) is not None:
snapshot['status'] = 'available'
return
else:
LOG.warning(_LW('Unable to locate volume:%s'),
volume_name)
snapshot['status'] = 'error_creating'
msg = _('Failed to create snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create new volume from other volume's snapshot on appliance."""
model_update = {}
scvolume = None
src_volume_name = snapshot.get('volume_id')
# This snapshot could have been created on its own or as part of a
# cgsnapshot. If it was a cgsnapshot it will be identified on the Dell
# backend under cgsnapshot_id. Given the volume ID and the
# cgsnapshot_id we can find the appropriate snapshot.
# So first we look for cgsnapshot_id. If that is blank then it must
# have been a normal snapshot which will be found under snapshot_id.
snapshot_id = snapshot.get('cgsnapshot_id')
if not snapshot_id:
snapshot_id = snapshot.get('id')
volume_name = volume.get('id')
LOG.debug(
'Creating new volume %(vol)s from snapshot %(snap)s '
'from vol %(src)s',
{'vol': volume_name,
'snap': snapshot_id,
'src': src_volume_name})
with self._client.open_connection() as api:
try:
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
replay = api.find_replay(srcvol,
snapshot_id)
if replay is not None:
volume_name = volume.get('id')
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
replay_profile_string = specs.get(
'storagetype:replayprofiles')
scvolume = api.create_view_volume(
volume_name, replay, replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(snap)s.') %
{'name': volume_name,
'snap': snapshot_id})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s created from %(snap)s',
{'vol': volume_name,
'snap': snapshot_id})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
model_update = {}
scvolume = None
src_volume_name = src_vref.get('id')
volume_name = volume.get('id')
LOG.debug('Creating cloned volume %(clone)s from volume %(vol)s',
{'clone': volume_name,
'vol': src_volume_name})
with self._client.open_connection() as api:
try:
if api.find_sc():
srcvol = api.find_volume(src_volume_name)
if srcvol is not None:
# See if we have any extra specs.
specs = self._get_volume_extra_specs(volume)
replay_profile_string = specs.get(
'storagetype:replayprofiles')
# Create our volume
scvolume = api.create_cloned_volume(
volume_name, srcvol, replay_profile_string)
if scvolume is None:
raise exception.VolumeBackendAPIException(
message=_('Unable to create volume '
'%(name)s from %(vol)s.') %
{'name': volume_name,
'vol': src_volume_name})
# Update Consistency Group
self._add_volume_to_consistency_group(api,
scvolume,
volume)
# Replicate if we are supposed to.
model_update = self._create_replications(api,
volume,
scvolume)
except Exception:
# Clean up after ourselves.
self._cleanup_failed_create_volume(api, volume_name)
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to create volume %s'),
volume_name)
if scvolume is not None:
LOG.debug('Volume %(vol)s cloned from %(src)s',
{'vol': volume_name,
'src': src_volume_name})
else:
msg = _('Failed to create volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
return model_update
def delete_snapshot(self, snapshot):
"""delete_snapshot"""
volume_name = snapshot.get('volume_id')
snapshot_id = snapshot.get('id')
LOG.debug('Deleting snapshot %(snap)s from volume %(vol)s',
{'snap': snapshot_id,
'vol': volume_name})
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if scvolume is not None:
if api.delete_replay(scvolume,
snapshot_id):
return
# if we are here things went poorly.
snapshot['status'] = 'error_deleting'
msg = _('Failed to delete snapshot %s') % snapshot_id
raise exception.VolumeBackendAPIException(data=msg)
def create_export(self, context, volume, connector):
"""Create an export of a volume.
The volume exists on creation and will be visible on
initialize connection. So nothing to do here.
"""
# TODO(tswanson): Move mapping code here.
pass
def ensure_export(self, context, volume):
"""Ensure an export of a volume.
Per the eqlx driver we just make sure that the volume actually
exists where we think it does.
"""
scvolume = None
volume_name = volume.get('id')
LOG.debug('Checking existence of volume %s', volume_name)
with self._client.open_connection() as api:
try:
if api.find_sc():
scvolume = api.find_volume(volume_name)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to ensure export of volume %s'),
volume_name)
if scvolume is None:
msg = _('Unable to find volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def remove_export(self, context, volume):
"""Remove an export of a volume.
We do nothing here to match the nothing we do in create export. Again
we do everything in initialize and terminate connection.
"""
pass
def extend_volume(self, volume, new_size):
"""Extend the size of the volume."""
volume_name = volume.get('id')
LOG.debug('Extending volume %(vol)s to %(size)s',
{'vol': volume_name,
'size': new_size})
if volume is not None:
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(volume_name)
if api.expand_volume(scvolume, new_size) is not None:
return
# If we are here nothing good happened.
msg = _('Unable to extend volume %s') % volume_name
raise exception.VolumeBackendAPIException(data=msg)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
with self._client.open_connection() as api:
storageusage = api.get_storage_usage() if api.find_sc() else None
# all of this is basically static for now
data = {}
data['volume_backend_name'] = self.backend_name
data['vendor_name'] = 'Dell'
data['driver_version'] = self.VERSION
data['storage_protocol'] = 'iSCSI'
data['reserved_percentage'] = 0
data['free_capacity_gb'] = 'unavailable'
data['total_capacity_gb'] = 'unavailable'
data['consistencygroup_support'] = True
# In theory if storageusage is None then we should have
# blown up getting it. If not just report unavailable.
if storageusage is not None:
totalcapacity = storageusage.get('availableSpace')
totalcapacitygb = self._bytes_to_gb(totalcapacity)
data['total_capacity_gb'] = totalcapacitygb
freespace = storageusage.get('freeSpace')
freespacegb = self._bytes_to_gb(freespace)
data['free_capacity_gb'] = freespacegb
data['QoS_support'] = False
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication_type'] = ['async', 'sync']
data['replication_count'] = len(self.backends)
self._stats = data
LOG.debug('Total cap %(total)s Free cap %(free)s',
{'total': data['total_capacity_gb'],
'free': data['free_capacity_gb']})
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update for migrated volume.
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:returns: model_update to update DB with any needed changes
"""
# We use id as our volume name so we need to rename the backend
# volume to the original volume name.
original_volume_name = volume.get('id')
current_name = new_volume.get('id')
LOG.debug('update_migrated_volume: %(current)s to %(original)s',
{'current': current_name,
'original': original_volume_name})
if original_volume_name:
with self._client.open_connection() as api:
if api.find_sc():
scvolume = api.find_volume(current_name)
if (scvolume and
api.rename_volume(scvolume, original_volume_name)):
# Replicate if we are supposed to.
model_update = self._create_replications(api,
new_volume,
scvolume)
model_update['_name_id'] = None
return model_update
# The world was horrible to us so we should error and leave.
LOG.error(_LE('Unable to rename the logical volume for volume: %s'),
original_volume_name)
return {'_name_id': new_volume['_name_id'] or new_volume['id']}
def create_consistencygroup(self, context, group):
"""This creates a replay profile on the storage backend.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Nothing on success.
:raises: VolumeBackendAPIException
"""
gid = group['id']
with self._client.open_connection() as api:
cgroup = api.create_replay_profile(gid)
if cgroup:
LOG.info(_LI('Created Consistency Group %s'), gid)
return
msg = _('Unable to create consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def delete_consistencygroup(self, context, group, volumes):
"""Delete the Dell SC profile associated with this consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be created.
:returns: Updated model_update, volumes.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if profile:
api.delete_replay_profile(profile)
# If we are here because we found no profile that should be fine
# as we are trying to delete it anyway.
# Now whack the volumes. So get our list.
volumes = self.db.volume_get_all_by_group(context, gid)
# Trundle through the list deleting the volumes.
for volume in volumes:
self.delete_volume(volume)
volume['status'] = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a consistency group.
:param context: the context of the caller.
:param group: the dictionary of the consistency group to be updated.
:param add_volumes: a list of volume dictionaries to be added.
:param remove_volumes: a list of volume dictionaries to be removed.
:returns: model_update, add_volumes_update, remove_volumes_update
model_update is a dictionary that the driver wants the manager
to update upon a successful return. If None is returned, the manager
will set the status to 'available'.
add_volumes_update and remove_volumes_update are lists of dictionaries
that the driver wants the manager to update upon a successful return.
Note that each entry requires a {'id': xxx} so that the correct
volume entry can be updated. If None is returned, the volume will
remain its original status. Also note that you cannot directly
assign add_volumes to add_volumes_update as add_volumes is a list of
cinder.db.sqlalchemy.models.Volume objects and cannot be used for
db update directly. Same with remove_volumes.
If the driver throws an exception, the status of the group as well as
those of the volumes to be added/removed will be set to 'error'.
"""
gid = group['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(gid)
if not profile:
LOG.error(_LE('Cannot find Consistency Group %s'), gid)
elif api.update_cg_volumes(profile,
add_volumes,
remove_volumes):
LOG.info(_LI('Updated Consistency Group %s'), gid)
# we need nothing updated above us so just return None.
return None, None, None
# Things did not go well so throw.
msg = _('Unable to update consistency group %s') % gid
raise exception.VolumeBackendAPIException(data=msg)
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Takes a snapshot of the consistency group.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to take.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.debug('profile %s replayid %s', profile, snapshotid)
if api.snap_cg_replay(profile, snapshotid, 0):
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
for snapshot in snapshots:
snapshot.status = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
# That didn't go well. Tell them why. Then bomb out.
LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
else:
LOG.error(_LE('Cannot find Consistency Group %s'), cgid)
msg = _('Unable to snap Consistency Group %s') % cgid
raise exception.VolumeBackendAPIException(data=msg)
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot.
If profile isn't found return success. If failed to delete the
replay (the snapshot) then raise an exception.
:param context: the context of the caller.
:param cgsnapshot: Information about the snapshot to delete.
:returns: Updated model_update, snapshots.
:raises: VolumeBackendAPIException.
"""
cgid = cgsnapshot['consistencygroup_id']
snapshotid = cgsnapshot['id']
with self._client.open_connection() as api:
profile = api.find_replay_profile(cgid)
if profile:
LOG.info(_LI('Deleting snapshot %(ss)s from %(pro)s'),
{'ss': snapshotid,
'pro': profile})
if not api.delete_cg_replay(profile, snapshotid):
msg = (_('Unable to delete Consistency Group snapshot %s')
% snapshotid)
raise exception.VolumeBackendAPIException(data=msg)
snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
context, snapshotid)
for snapshot in snapshots:
snapshot.status = 'deleted'
model_update = {'status': 'deleted'}
return model_update, snapshots
def manage_existing(self, volume, existing_ref):
"""Brings an existing backend storage object under Cinder management.
existing_ref is passed straight through from the API request's
manage_existing_ref value, and it is up to the driver how this should
be interpreted. It should be sufficient to identify a storage object
that the driver should somehow associate with the newly-created cinder
volume structure.
There are two ways to do this:
1. Rename the backend storage object so that it matches the,
volume['name'] which is how drivers traditionally map between a
cinder volume and the associated backend storage object.
2. Place some metadata on the volume, or somewhere in the backend, that
allows other driver requests (e.g. delete, clone, attach, detach...)
to locate the backend storage object when required.
If the existing_ref doesn't make sense, or doesn't refer to an existing
backend storage object, raise a ManageExistingInvalidReference
exception.
The volume may have a volume_type, and the driver can inspect that and
compare against the properties of the referenced backend storage
object. If they are incompatible, raise a
ManageExistingVolumeTypeMismatch, specifying a reason for the failure.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
api.manage_existing(volume['id'], existing_ref)
# Replicate if we are supposed to.
scvolume = api.find_volume(volume['id'])
model_update = self._create_replications(api, volume, scvolume)
if model_update:
return model_update
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
# Only return a model_update if we have replication info to add.
return None
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of volume to be managed by manage_existing.
When calculating the size, round up to the next GB.
:param volume: Cinder volume to manage
:param existing_ref: Driver-specific information used to identify a
volume
"""
if existing_ref.get('source-name') or existing_ref.get('source-id'):
with self._client.open_connection() as api:
return api.get_unmanaged_volume_size(existing_ref)
else:
msg = _('Must specify source-name or source-id.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=msg)
def unmanage(self, volume):
"""Removes the specified volume from Cinder management.
Does not delete the underlying backend storage object.
For most drivers, this will not need to do anything. However, some
drivers might use this call as an opportunity to clean up any
Cinder-specific configuration that they have associated with the
backend storage object.
:param volume: Cinder volume to unmanage
"""
with self._client.open_connection() as api:
scvolume = api.find_volume(volume['id'])
if scvolume:
api.unmanage(scvolume)
def _get_retype_spec(self, diff, volume_name, specname, spectype):
"""Helper function to get current and requested spec.
:param diff: A difference dictionary.
:param volume_name: The volume name we are working with.
:param specname: The pretty name of the parameter.
:param spectype: The actual spec string.
:return: current, requested spec.
:raises: VolumeBackendAPIException
"""
spec = (diff['extra_specs'].get(spectype))
if spec:
if len(spec) != 2:
msg = _('Unable to retype %(specname)s, expected to receive '
'current and requested %(spectype)s values. Value '
'received: %(spec)s') % {'specname': specname,
'spectype': spectype,
'spec': spec}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
current = spec[0]
requested = spec[1]
if current != requested:
LOG.debug('Retyping volume %(vol)s to use %(specname)s '
'%(spec)s.',
{'vol': volume_name,
'specname': specname,
'spec': requested})
return current, requested
else:
LOG.info(_LI('Retype was to same Storage Profile.'))
return None, None
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
model_update = None
# Any spec changes?
if diff['extra_specs']:
volume_name = volume.get('id')
with self._client.open_connection() as api:
try:
# Get our volume
scvolume = api.find_volume(volume_name)
if scvolume is None:
LOG.error(_LE('Retype unable to find volume %s.'),
volume_name)
return False
# Check our specs.
# Storage profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Storage Profile',
'storagetype:storageprofile'))
# if there is a change and it didn't work fast fail.
if (current != requested and not
api.update_storage_profile(scvolume, requested)):
LOG.error(_LE('Failed to update storage profile'))
return False
# Replay profiles.
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replay Profiles',
'storagetype:replayprofiles'))
# if there is a change and it didn't work fast fail.
if requested and not api.update_replay_profiles(scvolume,
requested):
LOG.error(_LE('Failed to update replay profiles'))
return False
# Replication_enabled.
current, requested = (
self._get_retype_spec(diff,
volume_name,
'replication_enabled',
'replication_enabled'))
# if there is a change and it didn't work fast fail.
if current != requested:
if requested:
model_update = self._create_replications(api,
volume,
scvolume)
else:
self._delete_replications(api, volume)
model_update = {'replication_status': 'disabled',
'replication_driver_data': ''}
# Active Replay
current, requested = (
self._get_retype_spec(diff, volume_name,
'Replicate Active Replay',
'replication:activereplay'))
if current != requested and not (
api.update_replicate_active_replay(
scvolume, requested == '<is> True')):
LOG.error(_LE('Failed to apply '
'replication:activereplay setting'))
return False
# TODO(tswanson): replaytype once it actually works.
except exception.VolumeBackendAPIException:
# We do nothing with this. We simply return failure.
return False
# If we have something to send down...
if model_update:
return model_update
return True
def replication_enable(self, context, vref):
"""Re-enable replication on vref.
:param context: NA
:param vref: Cinder volume reference.
:return: model_update.
"""
volumename = vref.get('id')
LOG.info(_LI('Enabling replication on %s'), volumename)
model_update = {}
with self._client.open_connection() as api:
replication_driver_data = vref.get('replication_driver_data')
destssns = self._split(replication_driver_data)
do_repl, sync = self._do_repl(api, vref)
if destssns and do_repl:
scvolume = api.find_volume(volumename)
if scvolume:
for destssn in destssns:
if not api.resume_replication(scvolume, int(destssn)):
LOG.error(_LE('Unable to resume replication on '
'volume %(vol)s to SC %(ssn)s'),
{'vol': volumename,
'ssn': destssn})
model_update['replication_status'] = 'error'
break
else:
LOG.error(_LE('Volume %s not found'), volumename)
else:
LOG.error(_LE('Replication not enabled or no replication '
'destinations found. %s'),
volumename)
return model_update
def replication_disable(self, context, vref):
"""Disable replication on vref.
:param context: NA
:param vref: Cinder volume reference.
:return: model_update.
"""
volumename = vref.get('id')
LOG.info(_LI('Disabling replication on %s'), volumename)
model_update = {}
with self._client.open_connection() as api:
replication_driver_data = vref.get('replication_driver_data')
destssns = self._split(replication_driver_data)
do_repl, sync = self._do_repl(api, vref)
if destssns and do_repl:
scvolume = api.find_volume(volumename)
if scvolume:
for destssn in destssns:
if not api.pause_replication(scvolume, int(destssn)):
LOG.error(_LE('Unable to pause replication on '
'volume %(vol)s to SC %(ssn)s'),
{'vol': volumename,
'ssn': destssn})
model_update['replication_status'] = 'error'
break
else:
LOG.error(_LE('Volume %s not found'), volumename)
else:
LOG.error(_LE('Replication not enabled or no replication '
'destinations found. %s'),
volumename)
return model_update
def _find_host(self, ssnstring):
"""Find the backend associated with this ssnstring.
:param ssnstring: The ssn of the storage center we are looking for.
:return: The managed_backend_name associated with said storage center.
"""
for backend in self.backends:
if ssnstring == backend['target_device_id']:
return backend['managed_backend_name']
return None
def _parse_secondary(self, api, vref, secondary):
"""Find the replication destination associated with secondary.
:param api: Dell StorageCenterApi
:param vref: Cinder Volume
:param secondary: String indicating the secondary to failover to.
:return: Destination SSN and the host string for the given secondary.
"""
LOG.debug('_parse_secondary. Looking for %s.', secondary)
replication_driver_data = vref['replication_driver_data']
destssn = None
host = None
ssnstrings = self._split(replication_driver_data)
# Trundle through these and delete them all.
for ssnstring in ssnstrings:
# If they list a secondary it has to match.
# If they do not list a secondary we return the first
# replication on a working system.
if not secondary or secondary == ssnstring:
# Is a string. Need an int.
ssn = int(ssnstring)
# Without the source being up we have no good
# way to pick a destination to failover to. So just
# look for one that is just up.
try:
# If the SC ssn exists check if we are configured to
# use it.
if api.find_sc(ssn):
host = self._find_host(ssnstring)
# If host then we are configured.
if host:
# Save our ssn and get out of here.
destssn = ssn
break
except exception.VolumeBackendAPIException:
LOG.warning(_LW('SSN %s appears to be down.'), ssn)
LOG.info(_LI('replication failover secondary is %(ssn)s %(host)s'),
{'ssn': destssn,
'host': host})
return destssn, host
def replication_failover(self, context, vref, secondary):
"""Failover to secondary.
The flow is as follows.
1.The user explicitly requests a failover of a replicated volume.
2.Driver breaks replication.
a. Neatly by deleting the SCReplication object if the
primary is still up.
b. Brutally by unmapping the replication volume if it isn't.
3.We rename the volume to "Cinder failover <Volume GUID>"
4.Change Cinder DB entry for which backend controls the volume
to the backend listed in the replication_device.
5.That's it.
Completion of the failover is done on first use on the new backend.
We do this by modifying the find_volume function.
Find volume searches the following places in order:
1. "<Volume GUID>" in the backend's volume folder.
2. "<Volume GUID>" outside of the volume folder.
3. "Cinder failover <Volume GUID>" anywhere on the system.
If "Cinder failover <Volume GUID>" was found:
1.Volume is renamed to "<Volume GUID>".
2.Volume is moved to the new backend's volume folder.
3.The volume is now available on the secondary backend.
:param context;
:param vref: Cinder volume reference.
:param secondary: SSN of the destination Storage Center
:return: model_update on failover.
"""
LOG.info(_LI('Failing replication %(vol)s to %(sec)s'),
{'vol': vref.get('id'),
'sec': secondary})
# If we fall through this is our error.
msg = _('Unable to failover replication.')
with self._client.open_connection() as api:
# Basic check. We should never get here.
do_repl, sync = self._do_repl(api, vref)
if not do_repl:
# If we did get here then there is a disconnect. Set our
# message and raise (below).
msg = _('Unable to failover unreplicated volume.')
else:
# Look for the specified secondary.
destssn, host = self._parse_secondary(api, vref, secondary)
if destssn and host:
volumename = vref.get('id')
# This will break the replication on the SC side. At the
# conclusion of this the destination volume will be
# renamed to indicate failover is in progress. We will
# pick the volume up on the destination backend later.
if api.break_replication(volumename, destssn):
model_update = {}
model_update['host'] = host
model_update['replication_driver_data'] = None
return model_update
# We are here. Nothing went well.
LOG.error(_LE('Unable to break replication from '
'%(from)s to %(to)d.'),
{'from': volumename,
'to': destssn})
else:
LOG.error(_LE('Unable to find valid destination.'))
# We raise to indicate something bad happened.
raise exception.ReplicationError(volume_id=vref.get('id'),
reason=msg)
def list_replication_targets(self, context, vref):
"""Lists replication targets for the given vref.
We return targets the volume has been setup to replicate to and that
are configured on this backend.
:param context: NA
:param vref: Cinder volume object.
:return: A dict of the form {'volume_id': id,
'targets': [ {'type': xxx,
'target_device_id': xxx,
'backend_name': xxx}]}
"""
LOG.debug('list_replication_targets for volume %s', vref.get('id'))
targets = []
with self._client.open_connection() as api:
do_repl, sync = self._do_repl(api, vref)
# If we have no replication_driver_data then we have no replication
# targets
replication_driver_data = vref.get('replication_driver_data')
ssnstrings = self._split(replication_driver_data)
# If we have data.
if ssnstrings:
# Trundle through our backends.
for backend in self.backends:
# If we find a backend then we report it.
if ssnstrings.count(backend['target_device_id']):
target = {}
target['type'] = 'managed'
target['target_device_id'] = (
backend['target_device_id'])
target['backend_name'] = (
backend['managed_backend_name'])
targets.append(target)
else:
# We note if the source is not replicated to a
# configured destination for the backend.
LOG.info(_LI('Volume %(guid)s not replicated to '
'backend %(name)s'),
{'guid': vref['id'],
'name': backend['managed_backend_name']})
# At this point we note that what we found and what we
# expected to find were two different things.
if len(ssnstrings) != len(targets):
LOG.warning(_LW('Expected replication count %(rdd)d does '
'match configured replication count '
'%(tgt)d.'),
{'rdd': len(ssnstrings),
'tgt': len(targets)})
# Format response.
replication_targets = {'volume_id': vref.get('id'), 'targets': targets}
LOG.info(_LI('list_replication_targets: %s'), replication_targets)
return replication_targets
|
apache-2.0
| 7,205,747,925,299,166,000
| 44.622314
| 79
| 0.531366
| false
| 4.868419
| false
| false
| false
|
simoncozens/GlyphsPlugins
|
Comb.glyphsFilter/Contents/Resources/plugin.py
|
1
|
3651
|
# encoding: utf-8
from GlyphsApp.plugins import *
from math import cos, sin
from glyphmonkey import *
from itertools import izip
from GlyphsApp import LINE
class Comb(FilterWithDialog):
# Definitions of IBOutlets
# The NSView object from the User Interface. Keep this here!
dialog = objc.IBOutlet()
# Text field in dialog
myTextField = objc.IBOutlet()
def settings(self):
self.menuName = Glyphs.localize({'en': u'Comb Effect', 'de': u'Comb'})
# Load dialog from .nib (without .extension)
self.loadNib('IBdialog')
# On dialog show
def start(self):
# Set default setting if not present
if not Glyphs.defaults['org.simon-cozens.comb.teeth']:
Glyphs.defaults['org.simon-cozens.comb.teeth'] = "0,0.05,0.1,0.15,0.2,0.3,0.35,0.65,0.7,0.8,0.85,0.9,0.95,1"
self.myTextField.setStringValue_(Glyphs.defaults['org.simon-cozens.comb.teeth'])
self.myTextField.becomeFirstResponder()
# Action triggered by UI
@objc.IBAction
def setValue_( self, sender ):
# Store value coming in from dialog
Glyphs.defaults['org.simon-cozens.comb.teeth'] = sender.stringValue()
# Trigger redraw
self.update()
# Actual filter
def filter(self, layer, inEditView, customParameters):
# Called on font export, get value from customParameters
if customParameters.has_key('teeth'):
value = customParameters['teeth']
# Called through UI, use stored value
else:
value = Glyphs.defaults['org.simon-cozens.comb.teeth']
# Split teeth into array of arrays
t = map(float,value.split(","))
teeth = zip(t[::2], t[1::2])
self.combIt(layer, teeth)
def combIt(self, layer, teeth):
pathset = []
for a in layer.paths:
# Find the two smallest "ends"
l1, s1, l2, s2 = None, None, None, None
for i in range(0,len(a.segments)):
s = a.segments[i]
if type(s) is GSLineSegment and (not l1 or s.length < l1):
s1 = i
l1 = s.length
for i in range(0,len(a.segments)):
s = a.segments[i]
if type(s) is GSLineSegment and (s.length >= l1 and (not l2 or s.length < l2) and i != s1):
s2 = i
l2 = s.length
if s1 > s2: s1, s2 = s2, s1
print("Identified path end segments:")
print(a.segments[s1], a.segments[s2])
# Find two edges between segments
edge1 = [ a.segments[i] for i in range(s1+1, s2) ]
edge2 = [ a.segments[i] for i in range(s2+1, len(a.segments))]
edge2.extend([a.segments[i] for i in range(0, s1)])
for i in range(0, len(edge2)): edge2[i].reverse()
edge2.reverse()
print("\nIdentified edges")
print("Edge 1:", edge1)
print("Edge 2:", edge2)
print("Teeth ", teeth)
if len(edge1) != len(edge2):
print("Edges not compatible in " + str(layer) + " - differing number of points")
raise TypeError
for tooth in teeth:
start, end = tooth[0],tooth[1]
segs1 = []
segs2 = []
for i in range(0, len(edge1)):
segs1.append(edge1[i].interpolate(edge2[i],start))
segs2.append(edge1[i].interpolate(edge2[i],end))
for i in range(0, len(segs2)): segs2[i].reverse()
segs2.reverse()
segs1.append(GSLineSegment(tuple = (segs1[-1]._seg[-1],segs2[0]._seg[0])))
segs1.extend(segs2)
segs1.append(GSLineSegment(tuple = (segs2[-1]._seg[-1],segs1[0]._seg[0])))
segs = segs1
path = GSPath()
path.parent = a.parent
path.segments = segs
print("Adding ", path, " to ",pathset)
pathset.append(path)
path.closed = True
print(pathset)
layer.paths = pathset
def generateCustomParameter( self ):
return "%s; teeth:%s;" % (self.__class__.__name__, Glyphs.defaults['org.simon-cozens.comb.teeth'] )
|
mit
| -6,872,188,751,832,791,000
| 29.425
| 111
| 0.641468
| false
| 2.825851
| false
| false
| false
|
bucko909/powerpod
|
powerpod/types.py
|
1
|
25068
|
from collections import namedtuple
import datetime
import calendar
import struct
import sys
class StructType(object):
"""
Automatically uses SHAPE to pack/unpack simple structs.
"""
@classmethod
def from_binary(cls, data):
try:
return cls(*cls._decode(*struct.unpack(cls.SHAPE, data)))
except:
sys.stderr.write("Error parsing {!r}\n".format(data))
raise
@staticmethod
def _decode(*args):
""" data from unpack -> data for __init__ """
return args
def to_binary(self):
return struct.pack(self.SHAPE, *self._encode())
def _encode(self):
""" data from self -> data for pack """
return self
@classmethod
def byte_size(cls):
return struct.Struct(cls.SHAPE).size
class StructListType(object):
"""
Automatically uses SHAPE to pack/unpack simple structs which are followed by lists of RECORD_TYPE records.
You must have 'size' in _fields, which must be the record count, and a 'records' field to hold the decoded records.
RECORD_TYPE must have a 'size', and a 'from_binary' function.
"""
@classmethod
def from_binary(cls, data):
encode = struct.Struct(cls.SHAPE)
header_size = cls.byte_size()
header = encode.unpack(data[:header_size])
record_size = cls.RECORD_TYPE.byte_size()
try:
# Specifies number of records
size_offset = cls._fields.index('size')
record_count = header[size_offset]
assert header_size + record_count * record_size == len(data), (header_size, record_count, record_size, len(data))
except ValueError:
# Specifies length of data
size_offset = cls._fields.index('data_size')
total_size = header[size_offset]
assert len(data) == header_size + total_size, (header_size, total_size, len(data))
assert total_size % record_size == 0, (total_size, record_size)
record_count = header[size_offset] / record_size
raw_records = [data[header_size + record_size * x:header_size + record_size * (x + 1)] for x in range(record_count)]
return cls(*(cls._decode(*header) + (map(cls.RECORD_TYPE.from_binary, raw_records),)))
@staticmethod
def _decode(*args):
""" data from unpack -> data for __init__ """
return args
def to_binary(self):
data_binary = ''.join(record.to_binary() for record in self.records)
if hasattr(self, 'size'):
assert self.size == len(self.records), (self.size, len(self.records))
else:
assert self.data_size == len(data_binary), (self.data_size, data_binary)
return struct.pack(self.SHAPE, *self._encode()) + data_binary
def _encode(self):
""" data from self -> data for pack """
record_offset = self._fields.index('records')
return self[:record_offset] + self[record_offset+1:]
@classmethod
def byte_size(cls):
return struct.Struct(cls.SHAPE).size
TIME_FIELDS = [
('secs', 'b'),
('mins', 'b'),
('hours', 'b'),
('day', 'b'),
('month', 'b'),
('month_length', 'b'),
('year', 'h'),
]
class NewtonTime(StructType, namedtuple('NewtonTime', zip(*TIME_FIELDS)[0])):
SHAPE = '<' + ''.join(zip(*TIME_FIELDS)[1])
def as_datetime(self):
return datetime.datetime(self.year, self.month, self.day, self.hours, self.mins, self.secs)
@classmethod
def from_datetime(cls, datetime):
days_in_month = calendar.monthrange(datetime.year, datetime.month)[1]
return cls(datetime.second, datetime.minute, datetime.hour, datetime.day, datetime.month, days_in_month, datetime.year)
PROFILE_FIELDS = [
('unknown_0', 'h'),
# Facts about sample_smoothing flags:
# If I send (in GetProfileData) 0x0000, I get (in SetProfileData) 0x0800.
# If I send 0xffff, I get 0xffdf.
# If I send 0x0539, I get 0x0d19.
# If I send 0x2ef0, I get 0x2ed0.
# Both of these are preserved.
# Conclusion: 0x0800 must be set, 0x0020 must be unset.
# Switching from 5s sample smoothing to 1s sets 0x0008. Setting back unsets it.
# Annoyingly, Isaac only resets to '1 sec' when you 'Get from iBike' -- it'll never reset to '5 sec', so I guess it just checks the flag.
# Conclusion: 0x0008 is the "don't smooth for 5s" flag.
# A reset profile gets 10251 (=0x280b)
('sample_smoothing', 'H', {14554: 1, 14546: 5}),
('unknown_1', 'h'),
('null_1', 'i'),
('null_2', 'h'),
# If I send 0x0000, I get 0x8009.
# If I send 0x8009, I get 0x8009.
# If I send 0xffff, I get 0x8009.
# If I then set the 'user-edited' flag by messing with stuff, I get 0x8005.
# Reset to factory default -> 0x800c
# Save/load profile 0x800c -> 0x8009
# Mess with settings 0x8009 -> 0x8005
# Save/load profile 0x8005 -> 0x8009
# Factory default is actually recognised by model (aero/fric etc) values.
# On a pristine profile, I see 0x800e or 0x800d and it's reset to 0x8009 with just a get/set. On an old recording, I saw it reset to 0x8005 on a user-edit.
# Resetting the profile gets 0x800c. Setting it once (or running through setup) gets 0x800d.
# bit 0 1 2 3
# reset 0 0 1 1
# user-edited 1 0 1 0
# save/load 1 0 0 1
# TODO TODO TODO
('user_edited', 'H', {0x8009: False, 0x8005: True}),
('total_mass_lb', 'h'),
('wheel_circumference_mm', 'h'),
('null_3', 'h'),
('unknown_3', 'h'),
('unknown_2', 'h'),
('unknown_4', 'H'),
('unknown_5', 'h'),
('aero', 'f'),
('fric', 'f'),
('unknown_6', 'f'),
('unknown_7', 'f'),
('unknown_8', 'i'),
('wind_scaling_sqrt', 'f'),
('tilt_cal', 'h'),
('cal_mass_lb', 'h'),
('rider_mass_lb', 'h'),
('unknown_9', 'h'),
# ftp_per_kilo_ish:
# Unaffected by bike weight/total weight. Just rider weight.
# rider(lb) FTP 20min value
# 100 38 40 1 # Min valid
# 100 85 91 1
# 100 86 92 2
# 100 105 111 2
# 100 106 112 3
# 100 120 126 3
# 100 121 127 4
# 100 149 157 4
# 100 150 158 5
# 100 163 172 5
# 100 164 173 6
# 100 183 193 6
# 100 184 194 7
# 100 207 218 7
# 100 208 219 8
# 100 227 239 8
# 100 228 240 9
# 100 247 260 9
# 100 248 261 10 # Stops increasing
# 80 200 211 10 # Stops increasing
# 81 200 211 9
# 88 200 211 9
# 89 200 211 8
# 96 200 211 8
# 97 200 211 7
# 109 200 211 7
# 110 200 211 6
# 122 200 211 6
# 123 200 211 5
# 134 200 211 5
# 135 200 211 4
# 165 200 211 4
# 166 200 211 3
# 189 200 211 3
# 190 200 211 2
# 232 200 211 2
# 233 200 211 1
# Roughly, this is (ftp_per_kilo-1.2)/0.454
# The values around 3 seem underestimated (formula underestimates).
# I think this is something related to the Coggan scale,
# which goes from 1.26 FTPW/kg to 6.6 FTPW/kg
('ftp_per_kilo_ish', 'h'),
('watts_20_min', 'h'), # = FTP / 0.95
('unknown_a', 'h'), # 0x0301 -> 0x0b01 (+0x0800) when sample rate changed to 1s. Never restored, though!
('speed_id', 'H'),
('cadence_id', 'H'),
('hr_id', 'H'),
('power_id', 'H'),
('speed_type', 'B'),
('cadence_type', 'B'),
('hr_type', 'B'),
('power_type', 'B'),
('power_smoothing_seconds', 'H'),
('unknown_c', 'h'), # 0x0032
]
class NewtonProfile(StructType, namedtuple('NewtonProfile', zip(*PROFILE_FIELDS)[0])):
SHAPE = '<' + ''.join(zip(*PROFILE_FIELDS)[1])
@classmethod
def _decode(cls, *args):
# Alert when any of these are interesting.
assert args[cls._fields.index('unknown_0')] == 0x5c16, args[cls._fields.index('unknown_0')]
assert args[cls._fields.index('sample_smoothing')] in (0x38d2, 0x38da, 0x380b, 0x38fb, 0x382b, 0x38db, 0x280b), args[cls._fields.index('sample_smoothing')]
assert args[cls._fields.index('unknown_1')] == 0x382b, args[cls._fields.index('unknown_1')]
assert args[cls._fields.index('null_1')] == 0, args[cls._fields.index('null_1')]
assert args[cls._fields.index('null_2')] == 0, args[cls._fields.index('null_2')]
assert args[cls._fields.index('user_edited')] in (0x8009, 0x8005, 0x800d, 0x800c, 0x19, 0x8008), args[cls._fields.index('user_edited')]
assert args[cls._fields.index('null_3')] == 0, args[cls._fields.index('null_3')]
assert args[cls._fields.index('unknown_2')] in (0, 2), args[cls._fields.index('unknown_2')]
assert args[cls._fields.index('unknown_3')] in (0, 0x1988, 0x5f5c), args[cls._fields.index('unknown_3')]
assert args[cls._fields.index('unknown_4')] in (0xbc00, 0xe766, 0, 0x20ff), args[cls._fields.index('unknown_4')]
assert args[cls._fields.index('unknown_5')] in (0, 1), args[cls._fields.index('unknown_5')]
assert args[cls._fields.index('unknown_6')] in (-38.0, -10.0, 0.0), args[cls._fields.index('unknown_6')]
assert args[cls._fields.index('unknown_7')] in (1.0, 0.0), args[cls._fields.index('unknown_7')]
assert args[cls._fields.index('unknown_8')] == 1670644000, args[cls._fields.index('unknown_8')]
assert args[cls._fields.index('unknown_9')] in (1850, 1803), args[cls._fields.index('unknown_9')]
assert args[cls._fields.index('unknown_a')] in (0x0301, 0x0b01, 0x351), args[cls._fields.index('unknown_a')]
assert args[cls._fields.index('unknown_c')] == 50, args[cls._fields.index('unknown_c')]
args = list(args)
args[cls._fields.index('tilt_cal')] = args[cls._fields.index('tilt_cal')] * 0.1
return args
def _encode(self):
return self._replace(tilt_cal=int(round(self.tilt_cal * 10)))
@classmethod
def default(cls):
return cls(
total_mass_lb=205,
user_edited=0x8008,
wheel_circumference_mm=2096,
sample_smoothing=10251,
aero=0.4899250099658966,
fric=11.310999870300293,
unknown_6=0.0,
unknown_7=0.0,
wind_scaling_sqrt=1.1510859727859497,
speed_id=0,
cadence_id=0,
hr_id=0,
power_id=0,
speed_type=0,
cadence_type=0,
hr_type=0,
power_type=0,
tilt_cal=-0.7,
cal_mass_lb=205,
rider_mass_lb=180,
unknown_9=1803,
ftp_per_kilo_ish=1,
watts_20_min=85,
unknown_a=769,
# ^^ SetProfileData
power_smoothing_seconds=1,
unknown_c=50,
# ^^ SetProfileData2
unknown_0=0x5c16,
unknown_1=0x382b,
null_1=0,
null_2=0,
null_3=0,
unknown_3=0,
unknown_2=0,
unknown_4=0,
unknown_5=0,
unknown_8=1670644000,
# ^^^ Complete unknowns
)
def swap_endian(x):
return (x >> 8) + ((x & ((1 << 8) - 1)) << 8)
def to_signed(x, bits):
if x & 1 << (bits - 1):
return x - (1 << bits)
else:
return x
def to_unsigned(x, bits):
if x < 0:
return x + (1 << bits)
else:
return x
IDENTITY = lambda x: x
TO_TIMES_TEN_SIGNED = lambda base: lambda x: to_unsigned(int(x * 10), base)
FROM_TIMES_TEN_SIGNED = lambda base: lambda x: to_signed(x, base) * 0.1
FROM_TIMES_TEN = lambda x: x * 0.1
TO_TIMES_TEN = lambda x: int(x * 10)
RIDE_DATA_FIELDS = [
('elevation_feet', 16, lambda x: to_signed(swap_endian(x), 16), lambda x: swap_endian(to_unsigned(x, 16))),
('cadence', 8, IDENTITY, IDENTITY),
('heart_rate', 8, IDENTITY, IDENTITY),
('temperature_farenheit', 8, lambda x: x - 100, lambda x: x + 100),
('unknown_0', 9, lambda x: to_signed(x, 9), lambda x: to_unsigned(x, 9)),
('tilt', 10, FROM_TIMES_TEN_SIGNED(10), TO_TIMES_TEN_SIGNED(10)),
('speed_mph', 10, FROM_TIMES_TEN, TO_TIMES_TEN),
('wind_tube_pressure_difference', 10, IDENTITY, IDENTITY),
('power_watts', 11, IDENTITY, IDENTITY),
('dfpm_power_watts', 11, IDENTITY, IDENTITY),
('acceleration_maybe', 10, lambda x: to_signed(x, 10), lambda x: to_unsigned(x, 10)),
('stopped_flag_maybe', 1, IDENTITY, IDENTITY),
('unknown_3', 8, IDENTITY, IDENTITY), # if this is large, "drafting" becomes true
]
# unknown_0 seems to be highly correlated to altitude. It might be average or integrated tilt. It seems to affect the /first record/ of the ride in Isaac but not much else (small = high power, big = low power -- which supports it being some sort of tilt offset).
# acceleration_maybe seems negative when stopping, positive in general. My feeling is that it's forward acceleration. I can't get this to affect anything.
# Using 'set profile after the ride' seems to ignore both unknown_0 and acceleration_maybe. I guess they are internal values, but I can only guess what they might do.
assert sum(x[1] for x in RIDE_DATA_FIELDS) == 15 * 8
DECODE_FIFTEEN_BYTES = '{:08b}' * 15
ENCODE_FIFTEEN_BYTES = ''.join('{:0%sb}' % (fielddef[1],) for fielddef in RIDE_DATA_FIELDS)
class NewtonRideData(object):
SHAPE = '15s'
__slots__ = zip(*RIDE_DATA_FIELDS)[0]
def __init__(self, *args):
for name, value in zip(self.__slots__, args):
setattr(self, name, value)
@staticmethod
def byte_size():
# We are not a struct type, but we want to look like one.
return 15
@classmethod
def from_binary(cls, data):
if data.startswith('\xff\xff\xff\xff\xff\xff'):
return NewtonRideDataPaused.from_binary(data)
binary = DECODE_FIFTEEN_BYTES.format(*struct.unpack('15B', data))
vals = []
start = 0
for _name, size, decode, _encode in RIDE_DATA_FIELDS:
value = int(binary[start:start+size], 2)
start += size
vals.append(decode(value))
return cls(*vals)
def to_binary(self):
vals = []
for name, size, _decode, encode in RIDE_DATA_FIELDS:
value = getattr(self, name)
vals.append(encode(value))
binary = ENCODE_FIFTEEN_BYTES.format(*vals)
assert len(binary) == 15 * 8
chopped = [int(binary[x:x+8], 2) for x in range(0, 15*8, 8)]
return struct.pack('15B', *chopped)
@property
def elevation_metres(self):
return self.elevation_feet * 0.3048
def pressure_Pa(self, reference_pressure_Pa=101325, reference_temperature_kelvin=288.15):
return reference_pressure_Pa * (1 - (0.0065 * self.elevation_metres) / reference_temperature_kelvin) ** (9.80665 * 0.0289644 / 8.31447 / 0.0065)
@property
def temperature_kelvin(self):
return (self.temperature_farenheit + 459.67) * 5 / 9
def density(self, reference_pressure_Pa=101325, reference_temperature_kelvin=288.15):
# I say 0.8773 at 22.7778C/2516.7336m; they say 0.8768. Good enough...
# Constants from Wikipedia.
return self.pressure_Pa(reference_pressure_Pa, reference_temperature_kelvin) * 0.0289644 / 8.31447 / self.temperature_kelvin
def wind_speed_kph(self, offset=621, multiplier=13.6355, reference_pressure_Pa=101325, reference_temperature_kelvin=288.15, wind_scaling_sqrt=1.0):
# multiplier based on solving from CSV file
if self.wind_tube_pressure_difference < offset:
return 0.0
return ((self.wind_tube_pressure_difference - offset) / self.density(reference_pressure_Pa, reference_temperature_kelvin) * multiplier) ** 0.5 * wind_scaling_sqrt
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, ', '.join(repr(getattr(self, name)) for name in self.__slots__))
class NewtonRideDataPaused(StructType, namedtuple('NewtonRideDataPaused', 'tag newton_time unknown_3')):
SHAPE = '<6s8sb'
@staticmethod
def _decode(tag, newton_time_raw, unknown_3):
return (tag, NewtonTime.from_binary(newton_time_raw), unknown_3)
def _encode(self):
return (self.tag, self.newton_time.to_binary(), self.unknown_3)
RIDE_FIELDS = [
('unknown_0', 'h', IDENTITY, IDENTITY, 17), # byte 0 -- 0x1100 observed
('size', 'i', IDENTITY, IDENTITY, 0), # byte 2
('total_mass_lb', 'f', IDENTITY, IDENTITY, 235), # byte 6, always integer?!, could be total mass
('energy_kJ', 'f', IDENTITY, IDENTITY, 0), # byte 10
('aero', 'f', IDENTITY, IDENTITY, 0.384), # byte 14
('fric', 'f', IDENTITY, IDENTITY, 12.0), # byte 18
('initial_elevation_feet', 'f', IDENTITY, IDENTITY, 0), # byte 22, always integer?!
('elevation_gain_feet', 'f', IDENTITY, IDENTITY, 0), # byte 26, always integer?!
('wheel_circumference_mm', 'f', IDENTITY, IDENTITY, 2136.0), # byte 30, always integer?!
('unknown_1', 'h', IDENTITY, IDENTITY, 15), # byte 34, 0x0f00 and 0x0e00 and 0x0e00 observed; multiplying by 10 does nothing observable. TODO is this ftp per kilo ish?
('unknown_2', 'h', IDENTITY, IDENTITY, 1), # byte 36, =1?
('start_time', '8s', NewtonTime.from_binary, NewtonTime.to_binary, NewtonTime(0, 0, 0, 1, 1, 31, 2000)), # byte 38
('pressure_Pa', 'i', IDENTITY, IDENTITY, 101325), # byte 46, appears to be pressure in Pa (observed range 100121-103175) # (setting, reported) = [(113175, 1113), (103175, 1014), (93175, 915), (203175, 1996), (1e9, 9825490), (2e9, 19650979), (-2e9, -19650979)]. Reported value in Isaac (hPa) is this divided by ~101.7761 or multiplied by 0.00982549. This isn't affected by truncating the ride at all. It /is/ affected by unknown_3; if I make unknown_3 -73 from 73, I get (-2e9, -19521083).
('Cm', 'f', IDENTITY, IDENTITY, 1.0204), # byte 50
# average_temperature_farenheit = Average of temperature records. Does not affect displayed temperature in Isaac. It affects displayed pressure in Isaac (bigger temp = closer to pressure_Pa).
# pressure_Pa = 103175
# average_temperature_farenheit = 1, pressure = 1011mbar
# average_temperature_farenheit = 100, pressure = 1015mbar
# average_temperature_farenheit = 10000, pressure = 1031mbar
# pressure_Pa = 1e9
# average_temperature_farenheit = 1, pressure = 9798543mbar
# average_temperature_farenheit = 100, pressure = 9833825mbar
# average_temperature_farenheit = 10000, pressure = 9991024mbar
('average_temperature_farenheit', 'h', IDENTITY, IDENTITY, 73), # byte 54.
('wind_scaling_sqrt', 'f', IDENTITY, IDENTITY, 1.0), # byte 56
('riding_tilt_times_10', 'h', IDENTITY, IDENTITY, 0.0), # byte 60
('cal_mass_lb', 'h', IDENTITY, IDENTITY, 235), # byte 62
('unknown_5', 'h', IDENTITY, IDENTITY, 88), # byte 64, 0x5800 and 0x6000 and 0x5c00 observed; multiplying by 10 doesn't affect: wind speed, pressure, temperature.
('wind_tube_pressure_offset', 'h', lambda x: x - 1024, lambda x: x + 1024, 620), # byte 66, this is a 10-bit signed negative number cast to unsigned and stored in a 16 bit int...
('unknown_7', 'i', IDENTITY, IDENTITY, 0), # byte 68, 0x00000000 observed
('reference_temperature_kelvin', 'h', IDENTITY, IDENTITY, 288), # byte 72, normally 288 (14.85C)
('reference_pressure_Pa', 'i', IDENTITY, IDENTITY, 101325), # byte 74
('unknown_9', 'h', IDENTITY, IDENTITY, 1), # byte 78 -- 0x0100 observed
('unknown_a', 'h', IDENTITY, IDENTITY, 50), # byte 80 -- 0x3200 observed
# byte 82
]
RIDE_DECODE = zip(*RIDE_FIELDS)[2]
RIDE_ENCODE = zip(*RIDE_FIELDS)[3]
RIDE_DEFAULTS = {key: value for key, _, _, _, value in RIDE_FIELDS}
class NewtonRide(StructListType, namedtuple('NewtonRide', zip(*RIDE_FIELDS)[0] + ('records',))):
SHAPE = '<' + ''.join(zip(*RIDE_FIELDS)[1])
RECORD_TYPE = NewtonRideData
@classmethod
def make(cls, data, **kwargs):
kwargs = {}
assert 'size' not in kwargs
assert 'records' not in kwargs
for name in cls._fields[:-1]:
kwargs[name] = RIDE_DEFAULTS[name]
kwargs['records'] = data
kwargs['size'] = len(data)
if data:
# TODO start_time, elevation gain
kwargs['average_temperature_farenheit'] = int(round(sum(x.temperature_farenheit for x in data if hasattr(x, 'temperature_farenheit')) / len(data)))
kwargs['initial_elevation_feet'] = [x.elevation_feet for x in data if hasattr(x, 'elevation_feet')][0]
kwargs['data_records'] = len(data)
kwargs['energy_kJ'] = int(round(sum(x.power_watts for x in data if hasattr(x, 'power_watts')) / 1000))
args = []
for name in cls._fields:
args.append(kwargs[name])
return cls(*args)
def _encode(self):
return tuple(encode(val) for val, encode in zip(self[:-1], RIDE_ENCODE))
@staticmethod
def _decode(*args):
return tuple(decode(val) for val, decode in zip(args, RIDE_DECODE))
def get_header(self):
return NewtonRideHeader(self.unknown_0, self.start_time, sum(x.speed_mph * 1602 / 3600. for x in self.records if isinstance(x, NewtonRideData)))
def fit_to(self, csv):
pure_records = [x for x in self.records if not hasattr(x, 'newton_time')]
csv_data = [float(x['Wind Speed (km/hr)']) for x in csv.data]
compare = [(x, y) for x, y in zip(pure_records, csv_data) if y > 0]
reference_pressure_kPa = self.reference_pressure_Pa / 1000.0
get_errors = lambda offset, multiplier: [pure_record.wind_speed_kph(offset, multiplier, reference_pressure_kPa, self.reference_temperature_kelvin, self.wind_scaling_sqrt) - csv_datum for pure_record, csv_datum in compare]
dirs = [(x, y) for x in range(-1, 2) for y in range(-1, 2) if x != 0 or y != 0]
print dirs
skip = 500
best = current = (500, 10)
best_error = float('inf')
while skip > 0.000001:
new_best = False
for x, y in dirs:
test = (current[0] + x * skip, current[1] + y * skip * 0.02)
if test[1] < 0:
continue
error = sum(map(abs, get_errors(*test)))
#print test, error
if error < best_error:
best = test
best_error = error
new_best = True
if new_best:
current = best
else:
skip *= 0.5
#print best, skip, best_error
errors = get_errors(*best)
return best, best_error, max(map(abs, errors)), ["%0.4f" % (x,) for x in errors]
def fit_elevation(self, csv):
pure_records = [x for x in self.records if not hasattr(x, 'newton_time')]
csv_data = [float(x['Elevation (meters)']) / 0.3048 for x in csv.data]
compare = [(x, y) for x, y in zip(pure_records, csv_data)]
get_errors = lambda mul: [(pure_record.density(), pure_record.elevation_feet, csv_datum, pure_record.elevation_feet - csv_datum, (pure_record.wind_tube_pressure_difference - self.wind_tube_pressure_offset), pure_record.tilt, pure_record.unknown_0, pure_record) for pure_record, csv_datum in compare]
return get_errors(0.1)
class NewtonRideHeader(StructType, namedtuple('NewtonRideHeader', 'unknown_0 start_time distance_metres')):
# \x11\x00
# newton time
# float encoding of ride length in metres.
SHAPE = '<h8sf'
def _encode(self):
return (self.unknown_0, self.start_time.to_binary(), self.distance_metres)
@classmethod
def _decode(cls, unknown_0, start_time_raw, distance_metres):
return (unknown_0, NewtonTime.from_binary(start_time_raw), distance_metres)
def to_filename(self):
return "powerpod.%s-%0.1fkm.raw" % (self.start_time.as_datetime().strftime("%Y-%m-%dT%H-%M-%S"), self.distance_metres / 1000)
class NewtonProfileScreens(StructType):
# Data is laid out as [LEFT, RIGHT]
# Sides are [AGG1, AGG2, AGG3]
# Aggregates are [TOP, MIDDLE, BOTTOM]
# Meaning of indices in metrics
# (unverified, but 'average' is (1, 2, 1) and plain is (0, 2, 1))
AGG_NOW = 0
#AGG_TRIP = 1
AGG_AVG = 2
# Metrics (PowerPod 6.12)
METRIC_SPEED = (0, 2, 1)
METRIC_DISTANCE_POWER = (3, 5, 4)
METRIC_TIME = (6, 6, 6) # I guess no point in anything but 'trip'
METRIC_POWER = (7, 9, 8)
METRIC_OTHER = (10, 12, 11)
METRIC_SLOPE = (13, 15, 14)
METRIC_WIND = (16, 18, 17)
METRIC_BLANK = (19, 22, 20)
METRIC_NORMALISED_POWER = (21, 21, 21) # I guess no point in anything but 'trip'
# Which metrics are valid on which screens?
VALID_TOP = set([METRIC_SPEED, METRIC_WIND, METRIC_SLOPE, METRIC_POWER])
# Add averages.
VALID_TOP.update((z, y, z) for _x, y, z in list(VALID_TOP))
VALID_TOP.add(METRIC_BLANK)
VALID_MIDDLE = set([METRIC_POWER, METRIC_DISTANCE_POWER, METRIC_NORMALISED_POWER, METRIC_WIND, METRIC_BLANK])
VALID_BOTTOM = set([METRIC_TIME, METRIC_OTHER])
VALID = (VALID_BOTTOM, VALID_MIDDLE, VALID_TOP)
# Screens
TOP = 0
MIDDLE = 1
BOTTOM = 2
ROWS = 3
# Sides
LEFT = 0
RIGHT = 1
SIDES = 2
# Any triple is (Now, Trip, Average)
IDENTIFIER = 0x29
SHAPE = 'b' * 18
RESPONSE = None
def __init__(self, data):
self._data = list(data)
@classmethod
def _decode(cls, *args):
return args,
def _encode(self):
return self._data
def set_screen(self, side, row, metric, aggregate):
assert 0 <= side < self.SIDES, side
assert 0 <= row < self.ROWS, row
assert metric in self.VALID[row], (metric, row)
assert aggregate in (self.AGG_AVG, self.AGG_NOW), aggregate
metric = [metric[x] for x in (aggregate, 1, 2)]
for metric_idx in (0, 1, 2):
self._data[self._index(side, row, metric_idx)] = metric[metric_idx]
def to_dict(self):
sides = {}
for side_i, side_n in enumerate(['left', 'right']):
side = sides[side_n] = {}
for row_i, row_n in enumerate(['top', 'middle', 'bottom']):
row = side[row_n] = []
for metric_idx in (0, 1, 2):
row.append(self._data[self._index(side_i, row_i, metric_idx)])
return sides
def __repr__(self):
return "{}.from_dict({})".format(self.__class__.__name__, self.to_dict())
@classmethod
def from_dict(cls, sides):
data = [0] * 18
for side_i, side_n in enumerate(['left', 'right']):
side = sides[side_n]
for row_i, row_n in enumerate(['top', 'middle', 'bottom']):
row = side[row_n]
for metric_idx, value in enumerate(row):
data[cls._index(side_i, row_i, metric_idx)] = value
return cls(data)
@classmethod
def _index(cls, side, row, metric_idx):
return (side * 3 + metric_idx) * cls.ROWS + row
@classmethod
def default(cls):
return cls.from_dict({
'left': {
'top': cls.METRIC_SPEED,
'middle': cls.METRIC_DISTANCE_POWER,
'bottom': cls.METRIC_TIME,
},
'right': {
'top': cls.METRIC_SPEED,
'middle': cls.METRIC_POWER,
'bottom': cls.METRIC_OTHER,
}
})
|
bsd-2-clause
| -8,511,836,035,023,288,000
| 38.230047
| 489
| 0.65079
| false
| 2.74357
| false
| false
| false
|
snd/dejavu
|
recognize.py
|
1
|
1047
|
import sys
import warnings
warnings.filterwarnings("ignore")
import argparse
import timeit
from dejavu import Dejavu
from dejavu.timer import Timer
from dejavu.recognize import FileRecognizer
parser = argparse.ArgumentParser()
parser.add_argument("file", help="the file to recognize")
parser.add_argument(
"-s",
"--secs",
help="how many seconds to fingerprint for recognition",
type=int)
args = parser.parse_args()
# load config from a JSON file (or anything outputting a python dictionary)
config = {
"database": {
"host": "127.0.0.1",
"user": "root",
"passwd": "",
"db": "dejavu"
}
}
if args.secs:
config["fingerprint_limit"] = args.secs
if __name__ == '__main__':
# create a Dejavu instance
djv = Dejavu(config)
# Recognize audio from a file
print("start recognizing")
with Timer("djv.recognize") as t:
song = djv.recognize(FileRecognizer, args.file)
print("From file we recognized: %s\n" % song)
|
mit
| -60,996,768,827,672,870
| 22.266667
| 75
| 0.627507
| false
| 3.561224
| false
| false
| false
|
mgautierfr/ediap
|
libs/painter/__init__.py
|
1
|
3074
|
# This file is part of Edia.
#
# Ediap is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Edia is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Edia. If not, see <http://www.gnu.org/licenses/>
# Copyright 2014 Matthieu Gautier dev@mgautier.fr
import tkinter
from .actors import *
import language.nodes
from .nodes import *
class ConstantColor(language.nodes.Node):
def __init__(self, r, v, b):
language.nodes.Node.__init__(self)
r = min(max(r, 0), 255)
v = min(max(v, 0), 255)
b = min(max(b, 0), 255)
self.value = "#%02x%02x%02x"%(r,v,b)
self.opositeColor = "#%02x%02x%02x"%(255-r,255-v,255-b)
def depend(self):
return set([self])
def get_value(self):
return self.value
builtins = {
'draw_line' : draw_line,
'draw_rectangle' : draw_rectangle,
'draw_ellipse' : draw_ellipse,
'draw_quad' : draw_quad,
'draw_triangle' : draw_triangle,
'change_color' : change_color
}
constants = {
'red' : ConstantColor(255, 0, 0)
}
nodes = {
'Color' : Color
}
class Context:
def __init__(self, other=None):
if other is None:
self.fillColor = language.nodes.Value("#000000")
self.fillColor.opositColor = "#FFFFFF"
self.shapes = []
else:
self.fillColor = other.fillColor
self.shapes = other.shapes[:]
def __str__(self):
return "<PainterContext\n%s\n%s>"%(self.fillColor, self.shapes)
class ContextShower(tkinter.Frame):
def __init__(self, parent):
tkinter.Frame.__init__(self, parent)
self.canvas = tkinter.Canvas(self, bg="white")
self.canvas['height'] = self.canvas['width']
self.canvas.pack(side="top")
self.canvasState = tkinter.ttk.Treeview(self, columns=('value',))
self.canvasState['height'] = 1
self.canvasState.pack()
def delete(self, what):
self.canvas.delete(what)
def draw(self, context, token, shape_=True):
for shape in context.shapes:
if shape_:
shape.draw(self.canvas)
elif shape_ is False:
shape.update(self.canvas)
if token in shape.depend():
shape.draw_helper(token, self.canvas)
def update_hiddenstate(self, context):
for child in self.canvasState.get_children():
self.canvasState.delete(child)
value = context.fillColor()
self.canvasState.insert("", "end", "fillColor", text="fillColor", value=value, tags=("fillColor",))
self.canvasState.tag_configure("fillColor", background=value, foreground=context.fillColor.opositColor)
|
gpl-2.0
| 42,613,606,250,163,280
| 30.690722
| 111
| 0.634027
| false
| 3.537399
| false
| false
| false
|
pfouque/deezer-python
|
deezer/client.py
|
1
|
7891
|
"""
Implements a client class to query the
`Deezer API <http://developers.deezer.com/api>`_
"""
import json
try: # pragma: no cover - python 2
from urllib import urlencode
from urllib2 import urlopen
except ImportError: # pragma: no cover - python 3
from urllib.parse import urlencode
from urllib.request import urlopen
from deezer.resources import Album, Artist, Comment, Genre
from deezer.resources import Playlist, Radio, Track, User
from deezer.resources import Chart, Resource
class Client(object):
"""
A client to retrieve some basic infos about Deezer resourses.
Create a client instance with the provided options. Options should
be passed in to the constructor as kwargs.
>>> import deezer
>>> client = deezer.Client(app_id='foo', app_secret='bar')
This client provides several method to retrieve the content of most
sort of Deezer objects, based on their json structure.
"""
use_ssl = True
host = "api.deezer.com"
objects_types = {
'album': Album,
'artist': Artist,
'comment': Comment,
'editorial': None,
# 'folder': None, # need identification
'genre': Genre,
'playlist': Playlist,
'radio': Radio,
'search': None,
'track': Track,
'user': User,
'chart' : Chart
}
def __init__(self, **kwargs):
super(Client, self).__init__()
self.use_ssl = kwargs.get('use_ssl', self.use_ssl)
self.host = kwargs.get('host', self.host)
self.options = kwargs
self._authorize_url = None
self.app_id = kwargs.get('app_id')
self.app_secret = kwargs.get('app_secret')
self.access_token = kwargs.get('access_token')
def _process_json(self, item, parent=None):
"""
Recursively convert dictionary
to :class:`~deezer.resources.Resource` object
:returns: instance of :class:`~deezer.resources.Resource`
"""
if 'data' in item:
return [self._process_json(i, parent) for i in item['data']]
result = {}
for key, value in item.items():
if isinstance(value, dict) and ('type' in value or 'data' in value):
value = self._process_json(value, parent)
result[key] = value
if parent is not None and hasattr(parent, 'type'):
result[parent.type] = parent
if 'type' in result:
object_class = self.objects_types.get(result['type'], Resource)
else:
object_class = self.objects_types.get(parent, Resource)
return object_class(self, result)
@staticmethod
def make_str(value):
"""
Convert value to str in python2 and python3 compatible way
:returns: str instance
"""
try: # pragma: no cover - python 3
value = str(value)
except UnicodeEncodeError: # pragma: no cover - python 2
value = value.encode('utf-8')
return value
@property
def scheme(self):
"""
Get the http prefix for the address depending on the use_ssl attribute
"""
return self.use_ssl and 'https' or 'http'
def url(self, request=''):
"""Build the url with the appended request if provided."""
if request.startswith('/'):
request = request[1:]
return "{0}://{1}/{2}".format(self.scheme, self.host, request)
def object_url(self, object_t, object_id=None, relation=None, **kwargs):
"""
Helper method to build the url to query to access the object
passed as parameter
:raises TypeError: if the object type is invalid
"""
if object_t not in self.objects_types:
raise TypeError("{0} is not a valid type".format(object_t))
request_items = (object_t, object_id, relation)
request_items = (item for item in request_items if item is not None)
request_items = (str(item) for item in request_items)
request = '/'.join(request_items)
base_url = self.url(request)
if kwargs:
for key, value in kwargs.items():
if not isinstance(value, str):
kwargs[key] = self.make_str(value)
result = '{0}?{1}'.format(base_url, urlencode(kwargs))
else:
result = base_url
return result
def get_object(self, object_t, object_id=None, relation=None, parent=None,
**kwargs):
"""
Actually query the Deezer API to retrieve the object
:returns: json dictionary
"""
url = self.object_url(object_t, object_id, relation, **kwargs)
response = urlopen(url)
resp_str = response.read().decode('utf-8')
response.close()
jsn = json.loads(resp_str)
return self._process_json(jsn, parent)
def get_chart(self, relation=None, **kwargs):
"""
Get chart
:returns: a list of :class:`~deezer.resources.Resource` objects.
"""
return self.get_object("chart", object_id='0', relation=relation,
parent="chart", **kwargs)
def get_album(self, object_id, relation=None, **kwargs):
"""
Get the album with the provided id
:returns: an :class:`~deezer.resources.Album` object
"""
return self.get_object("album", object_id, relation=relation, **kwargs)
def get_artist(self, object_id, relation=None, **kwargs):
"""
Get the artist with the provided id
:returns: an :class:`~deezer.resources.Artist` object
"""
return self.get_object("artist", object_id, relation=relation, **kwargs)
def get_comment(self, object_id):
"""
Get the comment with the provided id
:returns: a :class:`~deezer.resources.Comment` object
"""
return self.get_object("comment", object_id)
def get_genre(self, object_id):
"""
Get the genre with the provided id
:returns: a :class:`~deezer.resources.Genre` object
"""
return self.get_object("genre", object_id)
def get_genres(self):
"""
:returns: a list of :class:`~deezer.resources.Genre` objects.
"""
return self.get_object("genre")
def get_playlist(self, object_id):
"""
Get the playlist with the provided id
:returns: a :class:`~deezer.resources.Playlist` object
"""
return self.get_object("playlist", object_id)
def get_radio(self, object_id=None):
"""
Get the radio with the provided id.
:returns: a :class:`~deezer.resources.Radio` object
"""
return self.get_object("radio", object_id)
def get_radios(self):
"""
Get a list of radios.
:returns: a list of :class:`~deezer.resources.Radio` objects
"""
return self.get_object("radio")
def get_radios_top(self):
"""
Get the top radios (5 radios).
:returns: a :class:`~deezer.resources.Radio` object
"""
return self.get_object("radio", relation="top")
def get_track(self, object_id):
"""
Get the track with the provided id
:returns: a :class:`~deezer.resources.Track` object
"""
return self.get_object("track", object_id)
def get_user(self, object_id):
"""
Get the user with the provided id
:returns: a :class:`~deezer.resources.User` object
"""
return self.get_object("user", object_id)
def search(self, query, relation=None, **kwargs):
"""
Search track, album, artist or user
:returns: a list of :class:`~deezer.resources.Resource` objects.
"""
return self.get_object("search", relation=relation, q=query, **kwargs)
|
mit
| -2,699,333,509,705,244,000
| 30.564
| 80
| 0.582816
| false
| 4.052902
| false
| false
| false
|
gajim/gajim
|
win/misc/create-launcher.py
|
1
|
5971
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
"""Creates simple Python .exe launchers for gui and cli apps
./create-launcher.py "3.8.0" <target-dir>
"""
import os
import sys
import subprocess
import shlex
import tempfile
import shutil
import struct
from distutils.spawn import find_executable
def build_resource(rc_path, out_path):
"""Raises subprocess.CalledProcessError"""
def is_64bit():
return struct.calcsize("P") == 8
subprocess.check_call(
["windres", "-O", "coff", "-F",
"pe-x86-64" if is_64bit() else "pe-i386", rc_path,
"-o", out_path])
def get_build_args():
python_name = os.path.splitext(os.path.basename(sys.executable))[0]
python_config = os.path.join(
os.path.dirname(sys.executable), python_name + "-config")
cflags = subprocess.check_output(
["sh", python_config, "--cflags"]).strip()
libs = subprocess.check_output(
["sh", python_config, "--libs"]).strip()
cflags = os.fsdecode(cflags)
libs = os.fsdecode(libs)
return shlex.split(cflags) + shlex.split(libs)
def build_exe(source_path, resource_path, is_gui, out_path):
args = ["gcc", "-s"]
if is_gui:
args.append("-mwindows")
args.extend(["-o", out_path, source_path, resource_path])
args.extend(get_build_args())
subprocess.check_call(args)
def get_launcher_code(debug):
template = """\
#include "Python.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <shellapi.h>
int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance,
LPSTR lpCmdLine, int nCmdShow)
{
int result;
LPWSTR *szArglist;
int nArgs;
int i;
szArglist = CommandLineToArgvW(GetCommandLineW(), &nArgs);
if( NULL == szArglist )
{
printf("CommandLineToArgvW failed");
return 0;
}
Py_NoUserSiteDirectory = 1;
Py_IgnoreEnvironmentFlag = 1;
Py_DontWriteBytecodeFlag = 1;
Py_FrozenFlag = 1;
Py_Initialize();
PySys_SetArgvEx(__argc, szArglist, 0);
result = PyRun_SimpleString(
"import sys; import os;"
"os.environ['GAJIM_DEBUG'] = '%s';"
"sys.frozen=True;"
"from pathlib import Path;"
"root_path = Path(sys.executable).parents[1];"
"from ctypes import windll;"
"windll.kernel32.SetDllDirectoryW(str(root_path / 'bin'));"
"from gajim import gajim;"
"gajim.main();");
Py_Finalize();
return result;
}
""" % int(debug)
return template
def get_resouce_code(filename, file_version, file_desc, icon_path,
product_name, product_version, company_name):
template = """\
1 ICON "%(icon_path)s"
1 VERSIONINFO
FILEVERSION %(file_version_list)s
PRODUCTVERSION %(product_version_list)s
FILEOS 0x4
FILETYPE 0x1
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904E4"
BEGIN
VALUE "CompanyName", "%(company_name)s"
VALUE "FileDescription", "%(file_desc)s"
VALUE "FileVersion", "%(file_version)s"
VALUE "InternalName", "%(internal_name)s"
VALUE "OriginalFilename", "%(filename)s"
VALUE "ProductName", "%(product_name)s"
VALUE "ProductVersion", "%(product_version)s"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
"""
def to_ver_list(v):
v = v.split("-")[0]
return ",".join(map(str, (list(map(int, v.split("."))) + [0] * 4)[:4]))
file_version_list = to_ver_list(file_version)
product_version_list = to_ver_list(product_version)
return template % {
"icon_path": icon_path, "file_version_list": file_version_list,
"product_version_list": product_version_list,
"file_version": file_version, "product_version": product_version,
"company_name": company_name, "filename": filename,
"internal_name": os.path.splitext(filename)[0],
"product_name": product_name, "file_desc": file_desc,
}
def build_launcher(out_path, icon_path, file_desc, product_name, product_version,
company_name, is_gui, debug=False):
src_ico = os.path.abspath(icon_path)
target = os.path.abspath(out_path)
file_version = product_version
dir_ = os.getcwd()
temp = tempfile.mkdtemp()
try:
os.chdir(temp)
with open("launcher.c", "w") as h:
h.write(get_launcher_code(debug))
shutil.copyfile(src_ico, "launcher.ico")
with open("launcher.rc", "w") as h:
h.write(get_resouce_code(
os.path.basename(target), file_version, file_desc,
"launcher.ico", product_name, product_version, company_name))
build_resource("launcher.rc", "launcher.res")
build_exe("launcher.c", "launcher.res", is_gui, target)
finally:
os.chdir(dir_)
shutil.rmtree(temp)
def main():
argv = sys.argv
version = argv[1]
target = argv[2]
company_name = "Gajim"
misc = os.path.dirname(os.path.realpath(__file__))
build_launcher(
os.path.join(target, "Gajim.exe"),
os.path.join(misc, "gajim.ico"), "Gajim", "Gajim",
version, company_name, True)
build_launcher(
os.path.join(target, "Gajim-Debug.exe"),
os.path.join(misc, "gajim.ico"), "Gajim", "Gajim",
version, company_name, False, debug=True)
# build_launcher(
# os.path.join(target, "history_manager.exe"),
# os.path.join(misc, "gajim.ico"), "History Manager", "History Manager",
# version, company_name, 'history_manager.py', True)
if __name__ == "__main__":
main()
|
gpl-3.0
| -4,010,021,185,229,620,700
| 27.706731
| 81
| 0.610283
| false
| 3.367738
| false
| false
| false
|
geometalab/Vector-Tiles-Reader-QGIS-Plugin
|
plugin/util/global_map_tiles.py
|
1
|
14349
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL2Tiles, Google Summer of Code 2007 & 2008
# Global Map Tiles Classes
# Purpose: Convert a raster into TMS tiles, create KML SuperOverlay EPSG:4326,
# generate a simple HTML viewers based on Google Maps and OpenLayers
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
#
###############################################################################
# Copyright (c) 2008 Klokan Petr Pridal. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
"""
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:900913 = EPSG:3785)
for Google Maps, Yahoo Maps, Microsoft Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it usefull for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
import math
class GlobalMercator(object):
"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:900913 (EPSG:gOOglE, Google Maps Global Mercator), EPSG:3785, OSGEO:41001.
Such tiles are compatible with Google Maps, Microsoft Virtual Earth, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in metres XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:900913
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:900913?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:900913
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:900913?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yeh?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually noticable.
How do I create a raster in EPSG:900913 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:900913'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is degined as EPSG:3785. WKT definition is in the official
EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPGS:900913:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.2572235630016,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tileSize, ty * self.tileSize, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tileSize, (ty + 1) * self.tileSize, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2 ** zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i - 1 if i != 0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2 ** zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2 ** zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
# ---------------------
class GlobalGeodetic(object):
"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tileSize=256):
self.tileSize = tileSize
def LatLonToPixels(self, lat, lon, zoom):
"Converts lat/lon to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = 180 / 256.0 / 2 ** zoom
px = (180 + lat) / res
py = (90 + lon) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tileSize)) - 1)
ty = int(math.ceil(py / float(self.tileSize)) - 1)
return tx, ty
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return 180 / 256.0 / 2 ** zoom
# return 180 / float( 1 << (8+zoom) )
def TileBounds(tx, ty, zoom):
"Returns bounds of the given tile"
res = 180 / 256.0 / 2 ** zoom
return (tx * 256 * res - 180, ty * 256 * res - 90, (tx + 1) * 256 * res - 180, (ty + 1) * 256 * res - 90)
|
gpl-2.0
| 6,512,009,764,957,224,000
| 40.351585
| 113
| 0.621298
| false
| 3.771091
| false
| false
| false
|
droodle/kansha
|
kansha/card/comp.py
|
1
|
12065
|
# -*- coding:utf-8 -*-
# --
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
# --
import dateutil.parser
from nagare import component, log, security
from nagare.i18n import _, _L
from .models import DataCard
from ..checklist import comp as checklist
from ..label import comp as label
from ..comment import comp as comment
from ..vote import comp as vote
from ..description import comp as description
from ..due_date import comp as due_date
from ..title import comp as title
from ..user import usermanager
from .. import exceptions, notifications
from ..toolbox import overlay
from ..gallery import comp as gallery
from nagare import editor
from nagare import validator
# WEIGHTING TYPES
WEIGHTING_OFF = 0
WEIGHTING_FREE = 1
WEIGHTING_LIST = 2
class NewCard(object):
"""New card component
"""
def __init__(self, column):
self.column = column
self.needs_refresh = False
def toggle_refresh(self):
self.needs_refresh = not self.needs_refresh
class Card(object):
"""Card component
"""
max_shown_members = 3
def __init__(self, id_, column, assets_manager, data=None):
"""Initialization
In:
- ``id_`` -- the id of the card in the database
- ``column`` -- father
"""
self.db_id = id_
self.id = 'card_' + str(self.db_id)
self.column = column
self.assets_manager = assets_manager
self._data = data
self.reload(data if data else self.data)
@property
def board(self):
return self.column.board
def reload(self, data=None):
"""Refresh the sub components
"""
data = data if data else self.data
self.title = component.Component(CardTitle(self))
self.checklists = component.Component(checklist.Checklists(self))
self.description = component.Component(CardDescription(self))
self.due_date = component.Component(due_date.DueDate(self))
self.gallery = component.Component(gallery.Gallery(self, self.assets_manager))
self.comments = component.Component(comment.Comments(self, data.comments))
self.flow = component.Component(CardFlow(self, self.comments, self.gallery))
self.labels = component.Component(label.CardLabels(self))
self.votes = component.Component(vote.Votes(self))
self.author = component.Component(usermanager.get_app_user(data.author.username, data=data.author))
self._weight = component.Component(CardWeightEditor(self))
# members part of the card
self.overlay_add_members = component.Component(
overlay.Overlay(lambda r: '+',
lambda r: component.Component(self).render(r, model='add_member_overlay'), dynamic=True, cls='card-overlay'))
self.new_member = component.Component(usermanager.AddMembers(self.autocomplete_method)).on_answer(self.add_members)
self.members = [component.Component(usermanager.get_app_user(member.username, data=member))
for member in data.members]
self.see_all_members = component.Component(overlay.Overlay(lambda r: "%s more..." % (len(self.members) - self.max_shown_members),
lambda r: component.Component(self).on_answer(self.remove_member).render(r, model='members_list_overlay'),
dynamic=False, cls='card-overlay'))
@property
def favorites(self):
"""Return favorites users for a given card
Ask favorites to self.column
Store favorites in self._favorites to avoid CallbackLookupError
Return:
- list of favorites (User instances) wrappend on component
"""
self._favorites = [component.Component(usermanager.get_app_user(username), "friend").on_answer(self.add_members)
for (username, _) in sorted(self.column.favorites.items(), key=lambda e:-e[1])[:5]
if username not in [member().username for member in self.members]]
return self._favorites
@property
def data(self):
"""Return the card object from the database
"""
if self._data is None:
self._data = DataCard.get(self.db_id)
return self._data
def __getstate__(self):
self._data = None
return self.__dict__
@property
def weight(self):
return self.data.weight
@weight.setter
def weight(self, value):
values = {'from': self.data.weight, 'to': value, 'card': self.data.title}
notifications.add_history(self.column.board.data, self.data, security.get_user().data, u'card_weight', values)
self.data.weight = value
def set_title(self, title):
"""Set title
In:
- ``title`` -- new title
"""
values = {'from': self.data.title, 'to': title}
notifications.add_history(self.column.board.data, self.data, security.get_user().data, u'card_title', values)
self.data.title = title
def get_title(self):
"""Get title
Return :
- the card title
"""
return self.data.title
def delete(self):
"""Delete itself"""
self.gallery().delete_assets()
DataCard.delete_card(self.data)
def move_card(self, card_index, column):
"""Move card
In:
- ``card_index`` -- new index of the card
- ``column`` -- new father
"""
data_card = self.data
data_card.index = card_index
column.data.cards.append(data_card)
self.column = column
def get_authorized_users(self):
"""Return user's which are authorized to be add on this card
Return:
- a set of user (UserData instance)
"""
return set(self.column.get_authorized_users()) | set(self.column.get_pending_users()) - set(self.data.members)
def autocomplete_method(self, value):
""" """
return [u for u in usermanager.UserManager.search(value) if u in self.get_authorized_users()]
def get_available_labels(self):
return self.column.get_available_labels()
#################
# Members methods
#################
def add_members(self, emails):
"""Add new members from emails
In:
- ``emails`` -- emails in string separated by "," or list of strings
Return:
- JS code, reload card and hide overlay
"""
members = []
if isinstance(emails, (str, unicode)):
emails = [e.strip() for e in emails.split(',') if e.strip() != '']
# Get all users with emails
for email in emails:
new_member = usermanager.UserManager.get_by_email(email)
if new_member:
members.append(new_member)
self._add_members(members)
return "YAHOO.kansha.reload_cards['%s']();YAHOO.kansha.app.hideOverlay();" % self.id
def _add_members(self, new_data_members):
"""Add members to a card
In:
- ``new_data_members`` -- all UserData instance to attach to card
Return:
- list of new DataMembers added
"""
res = []
for new_data_member in new_data_members:
if self.add_member(new_data_member):
res.append(new_data_member)
values = {'user_id': new_data_member.username, 'user': new_data_member.fullname, 'card': self.data.title}
notifications.add_history(self.column.board.data, self.data, security.get_user().data, u'card_add_member', values)
return res
def add_member(self, new_data_member):
"""Attach new member to card
In:
- ``new_data_member`` -- UserData instance
Return:
- the new DataMember added
"""
data = self.data
if (new_data_member not in data.members and
new_data_member in self.get_authorized_users()):
log.debug('Adding %s to members' % (new_data_member.username,))
data.members.append(new_data_member)
self.members.append(component.Component(usermanager.get_app_user(new_data_member.username, data=new_data_member)))
return new_data_member
def remove_member(self, username):
"""Remove member username from card member"""
data_member = usermanager.UserManager().get_by_username(username)
if data_member:
log.debug('Removing %s from card %s' % (username, self.id))
data = self.data
data.members.remove(data_member)
for member in self.members:
if member().username == username:
self.members.remove(member)
values = {'user_id': member().username, 'user': member().data.fullname, 'card': data.title}
notifications.add_history(self.column.board.data, data, security.get_user().data, u'card_remove_member', values)
else:
raise exceptions.KanshaException(_("User not found : %s" % username))
def remove_board_member(self, member):
"""Remove member from board
Remove member from board. If member is linked to a card, remove it
from the list of members
In:
- ``member`` -- Board Member instance to remove
"""
self.data.remove_board_member(member)
self.members = [component.Component(usermanager.get_app_user(m.username, data=m))
for m in self.data.members]
# Cover methods
def make_cover(self, asset):
"""Make card cover with asset
In:
- ``asset`` -- New cover, Asset component
"""
self.data.make_cover(asset)
def has_cover(self):
return self.data.cover is not None
def get_cover(self):
return gallery.Asset(self.data.cover, self.assets_manager)
def remove_cover(self):
self.data.remove_cover()
def new_start_from_ajax(self, request, response):
start = dateutil.parser.parse(request.GET['start']).date()
self.due_date().set_value(start)
class CardTitle(title.Title):
"""Card title component
"""
model = DataCard
field_type = 'input'
class CardDescription(description.Description):
# We work on wards
model = DataCard
type = _L('card')
class CardFlow(object):
"""Flow of comments, pictures, and so on, associated to a card"""
def __init__(self, card, *source_components):
"""Init method
In:
- ``source_components`` -- Components
- on an object inheriting from FlowSource
- having a "flow" view
"""
self.card = card
self.source_components = source_components
@property
def elements(self):
res = []
for s in self.source_components:
res.extend(s().flow_elements)
return sorted(res, key=lambda el: getattr(el(), 'creation_date', ''), reverse=True)
class CardWeightEditor(editor.Editor):
""" Card weight Form
"""
fields = {'weight'}
def __init__(self, target, *args):
"""
In:
- ``target`` -- Card instance
"""
super(CardWeightEditor, self).__init__(target, self.fields)
self.weight.validate(self.validate_weight)
def validate_weight(self, value):
"""
Integer or empty
"""
if value:
validator.IntValidator(value).to_int()
return value
@property
def board(self):
return self.target.board
def commit(self):
if self.is_validated(self.fields):
super(CardWeightEditor, self).commit(self.fields)
return True
return False
|
bsd-3-clause
| -3,670,669,339,380,194,000
| 31.964481
| 173
| 0.594115
| false
| 4.077391
| false
| false
| false
|
mcalmer/spacewalk
|
client/rhel/rhnlib/rhn/rpclib.py
|
1
|
24163
|
#
# This module contains all the RPC-related functions the RHN code uses
#
# Copyright (c) 2005--2018 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
__version__ = "$Revision$"
import socket
import re
import sys
from rhn import transports
from rhn.i18n import sstr
from rhn.UserDictCase import UserDictCase
try: # python2
import xmlrpclib
from types import ListType, TupleType, StringType, UnicodeType, DictType, DictionaryType
from urllib import splittype, splithost
except ImportError: # python3
import xmlrpc.client as xmlrpclib
ListType = list
TupleType = tuple
StringType = bytes
UnicodeType = str
DictType = dict
DictionaryType = dict
from urllib.parse import splittype, splithost
# Redirection handling
MAX_REDIRECTIONS = 5
def check_ipv6(n):
""" Returns true if n is IPv6 address, false otherwise. """
try:
socket.inet_pton(socket.AF_INET6, n)
return True
except:
return False
def split_host(hoststring):
""" Function used to split host information in an URL per RFC 2396
handle full hostname like user:passwd@host:port
"""
l = hoststring.split('@', 1)
host = None
port = None
user = None
passwd = None
if len(l) == 2:
hostport = l[1]
# userinfo present
userinfo = l[0].split(':', 1)
user = userinfo[0]
if len(userinfo) == 2:
passwd = userinfo[1]
else:
hostport = l[0]
# Now parse hostport
if hostport[0] == '[':
# IPv6 with port
host, port = re.split('(?<=\]):', hostport, 1)
host = host.lstrip('[').rstrip(']')
elif check_ipv6(hostport):
# just IPv6
host = hostport
else:
# IPv4
arr = hostport.split(':', 1)
host = arr[0]
if len(arr) == 2:
port = arr[1]
return (host, port, user, passwd)
def get_proxy_info(proxy):
if proxy == None:
raise ValueError("Host string cannot be null")
arr = proxy.split('://', 1)
if len(arr) == 2:
# scheme found, strip it
proxy = arr[1]
return split_host(proxy)
class MalformedURIError(IOError):
pass
# Originaly taken from xmlrpclib.ServerProxy, now changed most of the code
class Server:
"""uri [,options] -> a logical connection to an XML-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
verbose: verbosity level
proxy: use an HTTP proxy
username: username for authenticated HTTP proxy
password: password for authenticated HTTP proxy
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
# Default factories
_transport_class = transports.Transport
_transport_class_https = transports.SafeTransport
_transport_class_proxy = transports.ProxyTransport
_transport_class_https_proxy = transports.SafeProxyTransport
def __init__(self, uri, transport=None, encoding=None, verbose=0,
proxy=None, username=None, password=None, refreshCallback=None,
progressCallback=None, timeout=None):
# establish a "logical" server connection
#
# First parse the proxy information if available
#
if proxy != None:
(ph, pp, pu, pw) = get_proxy_info(proxy)
if pp is not None:
proxy = "%s:%s" % (ph, pp)
else:
proxy = ph
# username and password will override whatever was passed in the
# URL
if pu is not None and username is None:
username = pu
if pw is not None and password is None:
password = pw
self._uri = sstr(uri)
self._refreshCallback = None
self._progressCallback = None
self._bufferSize = None
self._proxy = proxy
self._username = username
self._password = password
self._timeout = timeout
if len(__version__.split()) > 1:
self.rpc_version = __version__.split()[1]
else:
self.rpc_version = __version__
self._reset_host_handler_and_type()
if transport is None:
self._allow_redirect = 1
transport = self.default_transport(self._type, proxy, username,
password, timeout)
else:
#
# dont allow redirect on unknow transports, that should be
# set up independantly
#
self._allow_redirect = 0
self._redirected = None
self.use_handler_path = 1
self._transport = transport
self._trusted_cert_files = []
self._lang = None
self._encoding = encoding
self._verbose = verbose
self.set_refresh_callback(refreshCallback)
self.set_progress_callback(progressCallback)
# referer, which redirect us to new handler
self.send_handler=None
self._headers = UserDictCase()
def default_transport(self, type, proxy=None, username=None, password=None,
timeout=None):
if proxy:
if type == 'https':
transport = self._transport_class_https_proxy(proxy,
proxyUsername=username, proxyPassword=password, timeout=timeout)
else:
transport = self._transport_class_proxy(proxy,
proxyUsername=username, proxyPassword=password, timeout=timeout)
else:
if type == 'https':
transport = self._transport_class_https(timeout=timeout)
else:
transport = self._transport_class(timeout=timeout)
return transport
def allow_redirect(self, allow):
self._allow_redirect = allow
def redirected(self):
if not self._allow_redirect:
return None
return self._redirected
def set_refresh_callback(self, refreshCallback):
self._refreshCallback = refreshCallback
self._transport.set_refresh_callback(refreshCallback)
def set_buffer_size(self, bufferSize):
self._bufferSize = bufferSize
self._transport.set_buffer_size(bufferSize)
def set_progress_callback(self, progressCallback, bufferSize=16384):
self._progressCallback = progressCallback
self._transport.set_progress_callback(progressCallback, bufferSize)
def _req_body(self, params, methodname):
return xmlrpclib.dumps(params, methodname, encoding=self._encoding)
def get_response_headers(self):
if self._transport:
return self._transport.headers_in
return None
def get_response_status(self):
if self._transport:
return self._transport.response_status
return None
def get_response_reason(self):
if self._transport:
return self._transport.response_reason
return None
def get_content_range(self):
"""Returns a dictionary with three values:
length: the total length of the entity-body (can be None)
first_byte_pos: the position of the first byte (zero based)
last_byte_pos: the position of the last byte (zero based)
The range is inclusive; that is, a response 8-9/102 means two bytes
"""
headers = self.get_response_headers()
if not headers:
return None
content_range = headers.get('Content-Range')
if not content_range:
return None
arr = filter(None, content_range.split())
assert arr[0] == "bytes"
assert len(arr) == 2
arr = arr[1].split('/')
assert len(arr) == 2
brange, total_len = arr
if total_len == '*':
# Per RFC, the server is allowed to use * if the length of the
# entity-body is unknown or difficult to determine
total_len = None
else:
total_len = int(total_len)
start, end = brange.split('-')
result = {
'length' : total_len,
'first_byte_pos' : int(start),
'last_byte_pos' : int(end),
}
return result
def accept_ranges(self):
headers = self.get_response_headers()
if not headers:
return None
if 'Accept-Ranges' in headers:
return headers['Accept-Ranges']
return None
def _reset_host_handler_and_type(self):
""" Reset the attributes:
self._host, self._handler, self._type
according the value of self._uri.
"""
# get the url
type, uri = splittype(self._uri)
if type is None:
raise MalformedURIError("missing protocol in uri")
# with a real uri passed in, uri will now contain "//hostname..." so we
# need at least 3 chars for it to maybe be ok...
if len(uri) < 3 or uri[0:2] != "//":
raise MalformedURIError
self._type = type.lower()
if self._type not in ("http", "https"):
raise IOError("unsupported XML-RPC protocol")
self._host, self._handler = splithost(uri)
if not self._handler:
self._handler = "/RPC2"
def _strip_characters(self, *args):
""" Strip characters, which are not allowed according:
http://www.w3.org/TR/2006/REC-xml-20060816/#charsets
From spec:
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] /* any Unicode character, excluding the surrogate blocks, FFFE, and FFFF. */
"""
regexp = r'[\x00-\x09]|[\x0b-\x0c]|[\x0e-\x1f]'
result=[]
for item in args:
item_type = type(item)
if item_type == StringType or item_type == UnicodeType:
item = re.sub(regexp, '', sstr(item))
elif item_type == TupleType:
item = tuple(self._strip_characters(i) for i in item)
elif item_type == ListType:
item = [self._strip_characters(i) for i in item]
elif item_type == DictType or item_type == DictionaryType:
item = dict([(self._strip_characters(name, val)) for name, val in item.items()])
# else: some object - should take care of himself
# numbers - are safe
result.append(item)
if len(result) == 1:
return result[0]
else:
return tuple(result)
def _request(self, methodname, params):
""" Call a method on the remote server
we can handle redirections. """
# the loop is used to handle redirections
redirect_response = 0
retry = 0
self._reset_host_handler_and_type()
while 1:
if retry >= MAX_REDIRECTIONS:
raise InvalidRedirectionError(
"Unable to fetch requested Package")
# Clear the transport headers first
self._transport.clear_headers()
for k, v in self._headers.items():
self._transport.set_header(k, v)
self._transport.add_header("X-Info",
'RPC Processor (C) Red Hat, Inc (version %s)' %
self.rpc_version)
# identify the capability set of this client to the server
self._transport.set_header("X-Client-Version", 1)
if self._allow_redirect:
# Advertise that we follow redirects
#changing the version from 1 to 2 to support backward compatibility
self._transport.add_header("X-RHN-Transport-Capability",
"follow-redirects=3")
if redirect_response:
self._transport.add_header('X-RHN-Redirect', '0')
if self.send_handler:
self._transport.add_header('X-RHN-Path', self.send_handler)
request = self._req_body(self._strip_characters(params), methodname)
try:
response = self._transport.request(self._host, \
self._handler, request, verbose=self._verbose)
save_response = self._transport.response_status
except xmlrpclib.ProtocolError:
if self.use_handler_path:
raise
else:
save_response = sys.exc_info()[1].errcode
self._redirected = None
retry += 1
if save_response == 200:
# exit redirects loop and return response
break
elif save_response not in (301, 302):
# Retry pkg fetch
self.use_handler_path = 1
continue
# rest of loop is run only if we are redirected (301, 302)
self._redirected = self._transport.redirected()
self.use_handler_path = 0
redirect_response = 1
if not self._allow_redirect:
raise InvalidRedirectionError("Redirects not allowed")
if self._verbose:
print("%s redirected to %s" % (self._uri, self._redirected))
typ, uri = splittype(self._redirected)
if typ != None:
typ = typ.lower()
if typ not in ("http", "https"):
raise InvalidRedirectionError(
"Redirected to unsupported protocol %s" % typ)
#
# We forbid HTTPS -> HTTP for security reasons
# Note that HTTP -> HTTPS -> HTTP is allowed (because we compare
# the protocol for the redirect with the original one)
#
if self._type == "https" and typ == "http":
raise InvalidRedirectionError(
"HTTPS redirected to HTTP is not supported")
self._host, self._handler = splithost(uri)
if not self._handler:
self._handler = "/RPC2"
# Create a new transport for the redirected service and
# set up the parameters on the new transport
del self._transport
self._transport = self.default_transport(typ, self._proxy,
self._username, self._password, self._timeout)
self.set_progress_callback(self._progressCallback)
self.set_refresh_callback(self._refreshCallback)
self.set_buffer_size(self._bufferSize)
self.setlang(self._lang)
if self._trusted_cert_files != [] and \
hasattr(self._transport, "add_trusted_cert"):
for certfile in self._trusted_cert_files:
self._transport.add_trusted_cert(certfile)
# Then restart the loop to try the new entry point.
if isinstance(response, transports.File):
# Just return the file
return response
# an XML-RPC encoded data structure
if isinstance(response, TupleType) and len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<%s for %s%s>" %
(self.__class__.__name__, self._host, self._handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self._request, name)
# note: to call a remote object with an non-standard name, use
# result getattr(server, "strange-python-name")(args)
def set_transport_flags(self, transfer=0, encoding=0, **kwargs):
if not self._transport:
# Nothing to do
return
kwargs.update({
'transfer' : transfer,
'encoding' : encoding,
})
self._transport.set_transport_flags(**kwargs)
def get_transport_flags(self):
if not self._transport:
# Nothing to do
return {}
return self._transport.get_transport_flags()
def reset_transport_flags(self):
# Does nothing
pass
# Allow user-defined additional headers.
def set_header(self, name, arg):
if type(arg) in [ type([]), type(()) ]:
# Multivalued header
self._headers[name] = [str(a) for a in arg]
else:
self._headers[name] = str(arg)
def add_header(self, name, arg):
if name in self._headers:
vlist = self._headers[name]
if not isinstance(vlist, ListType):
vlist = [ vlist ]
else:
vlist = self._headers[name] = []
vlist.append(str(arg))
# Sets the i18n options
def setlang(self, lang):
self._lang = lang
if self._transport and hasattr(self._transport, "setlang"):
self._transport.setlang(lang)
# Sets the CA chain to be used
def use_CA_chain(self, ca_chain = None):
raise NotImplementedError("This method is deprecated")
def add_trusted_cert(self, certfile):
self._trusted_cert_files.append(certfile)
if self._transport and hasattr(self._transport, "add_trusted_cert"):
self._transport.add_trusted_cert(certfile)
def close(self):
if self._transport:
self._transport.close()
self._transport = None
# RHN GET server
class GETServer(Server):
def __init__(self, uri, transport=None, proxy=None, username=None,
password=None, client_version=2, headers={}, refreshCallback=None,
progressCallback=None, timeout=None):
Server.__init__(self, uri,
proxy=proxy,
username=username,
password=password,
transport=transport,
refreshCallback=refreshCallback,
progressCallback=progressCallback,
timeout=timeout)
self._client_version = client_version
self._headers = headers
# Back up the original handler, since we mangle it
self._orig_handler = self._handler
# Download resumption
self.set_range(offset=None, amount=None)
def _req_body(self, params, methodname):
if not params or len(params) < 1:
raise Exception("Required parameter channel not found")
# Strip the multiple / from the handler
h_comps = filter(lambda x: x != '', self._orig_handler.split('/'))
# Set the handler we are going to request
hndl = h_comps + ["$RHN", params[0], methodname] + list(params[1:])
self._handler = '/' + '/'.join(hndl)
#save the constructed handler in case of redirect
self.send_handler = self._handler
# Add headers
#override the handler to replace /XMLRPC with pkg path
if self._redirected and not self.use_handler_path:
self._handler = self._new_req_body()
for h, v in self._headers.items():
self._transport.set_header(h, v)
if self._offset is not None:
if self._offset >= 0:
brange = str(self._offset) + '-'
if self._amount is not None:
brange = brange + str(self._offset + self._amount - 1)
else:
# The last bytes
# amount is ignored in this case
brange = '-' + str(-self._offset)
self._transport.set_header('Range', "bytes=" + brange)
# Flag that we allow for partial content
self._transport.set_transport_flags(allow_partial_content=1)
# GET requests have empty body
return ""
def _new_req_body(self):
type, tmpuri = splittype(self._redirected)
site, handler = splithost(tmpuri)
return handler
def set_range(self, offset=None, amount=None):
if offset is not None:
try:
offset = int(offset)
except ValueError:
# Error
raise RangeError("Invalid value `%s' for offset" % offset, None, sys.exc_info()[2])
if amount is not None:
try:
amount = int(amount)
except ValueError:
# Error
raise RangeError("Invalid value `%s' for amount" % amount, None, sys.exc_info()[2])
if amount <= 0:
raise RangeError("Invalid value `%s' for amount" % amount)
self._amount = amount
self._offset = offset
def reset_transport_flags(self):
self._transport.set_transport_flags(allow_partial_content=0)
def __getattr__(self, name):
# magic method dispatcher
return SlicingMethod(self._request, name)
def default_transport(self, type, proxy=None, username=None, password=None,
timeout=None):
ret = Server.default_transport(self, type, proxy=proxy, username=username, password=password, timeout=timeout)
ret.set_method("GET")
return ret
class RangeError(Exception):
pass
class InvalidRedirectionError(Exception):
pass
def getHeaderValues(headers, name):
import mimetools
if not isinstance(headers, mimetools.Message):
if name in headers:
return [headers[name]]
return []
return [x.split(':', 1)[1].strip() for x in
headers.getallmatchingheaders(name)]
class _Method:
""" some magic to bind an XML-RPC method to an RPC server.
supports "nested" methods (e.g. examples.getStateName)
"""
def __init__(self, send, name):
self._send = send
self._name = name
def __getattr__(self, name):
return _Method(self._send, "%s.%s" % (self._name, name))
def __call__(self, *args):
return self._send(self._name, args)
def __repr__(self):
return (
"<%s %s (%s)>" %
(self.__class__.__name__, self._name, self._send)
)
__str__ = __repr__
class SlicingMethod(_Method):
"""
A "slicing method" allows for byte range requests
"""
def __init__(self, send, name):
_Method.__init__(self, send, name)
self._offset = None
def __getattr__(self, name):
return SlicingMethod(self._send, "%s.%s" % (self._name, name))
def __call__(self, *args, **kwargs):
self._offset = kwargs.get('offset')
self._amount = kwargs.get('amount')
# im_self is a pointer to self, so we can modify the class underneath
try:
self._send.im_self.set_range(offset=self._offset,
amount=self._amount)
except AttributeError:
pass
result = self._send(self._name, args)
# Reset "sticky" transport flags
try:
self._send.im_self.reset_transport_flags()
except AttributeError:
pass
return result
def reportError(headers):
""" Reports the error from the headers. """
errcode = 0
errmsg = ""
s = "X-RHN-Fault-Code"
if s in headers:
errcode = int(headers[s])
s = "X-RHN-Fault-String"
if s in headers:
_sList = getHeaderValues(headers, s)
if _sList:
_s = ''.join(_sList)
import base64
errmsg = "%s" % base64.decodestring(_s)
return errcode, errmsg
|
gpl-2.0
| 4,209,307,803,917,549,600
| 32.794406
| 169
| 0.573687
| false
| 4.28574
| false
| false
| false
|
jenfly/monsoon-onset
|
scripts/thesis-figs.py
|
1
|
9561
|
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
sys.path.append('/home/jwalker/dynamics/python/monsoon-onset')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import animation
import collections
import atmos as atm
import merra
import indices
import utils
figwidth = 12
style = atm.homedir() + 'dynamics/python/mpl-styles/presentation.mplstyle'
plt.style.use(style)
fontsize = mpl.rcParams['font.size']
# ----------------------------------------------------------------------
pcpfile = '/home/jwalker/datastore/gpcp/gpcp_daily_1997-2014.nc'
datadir = atm.homedir() + 'datastore/merra2/analysis/'
files = {'PREC' : datadir + 'gpcp_dailyrel_CHP_MFC_1997-2015.nc'}
for nm in ['U', 'V']:
files[nm] = datadir + 'merra2_%s850_dailyrel_CHP_MFC_1980-2015.nc' % nm
mldfile = atm.homedir() + 'datastore/mld/ifremer_mld_DT02_c1m_reg2.0.nc'
indfile = datadir + 'merra2_index_CHP_MFC_1980-2015.nc'
lon1, lon2 = 60, 100
ndays = 5
with xray.open_dataset(pcpfile) as ds:
pcp = atm.subset(ds, {'day' : (1, 365)})
pcp.load()
for ssn in ['JAN', 'JUL', 'JJAS']:
days = atm.season_days(ssn)
pcp[ssn] = atm.dim_mean(pcp['PREC'], 'day', min(days), max(days))
pcp['ANN'] = pcp['PREC'].sum(dim='day')
pcp_jjas = pcp['PREC'].sel(day=atm.season_days('JJAS')).sum(dim='day')
pcp['FRAC'] = pcp_jjas / pcp['ANN']
pcp['PREC'] = atm.rolling_mean(pcp['PREC'], ndays, axis=0, center=True)
pcp['SECTOR'] = atm.dim_mean(pcp['PREC'], 'lon', lon1, lon2)
# Composites relative to onset day
data = {}
for nm in files:
filenm = files[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if 'year' in var:
var = var.mean(dim='year')
daydim = atm.get_coord(var, 'dayrel', 'dim')
data[nm] = atm.rolling_mean(var, ndays, axis=daydim)
# Mixed layer depths
imonth = 4 # Index for month of May
with xray.open_dataset(mldfile, decode_times=False) as ds:
mld = ds['mld'][imonth].load()
dims, coords = mld.dims, mld.coords
missval = mld.attrs['mask_value']
vals = mld.values
vals = np.ma.masked_array(vals, vals==missval)
vals = np.ma.filled(vals, np.nan)
mld = xray.DataArray(vals, dims=dims, coords=coords)
# Onset/retreat indices and timeseries
with xray.open_dataset(indfile) as index:
index.load()
# ----------------------------------------------------------------------
# Global precip maps in winter/summer
def precip_global(precip, clev=np.arange(0, 16.5, 1), cmap='hot_r'):
cticks = range(0, 17, 2)
m = atm.contourf_latlon(precip, clev=clev, cmap=cmap, extend='max',
colorbar=False)
cb = m.colorbar(ticks=cticks, size='3%')
cb.ax.set_title('mm/day', fontsize=12)
ssn_dict = {'JAN' : 'January', 'JUL' : 'July'}
fig_kw = {'figsize' : (0.75 * figwidth, 0.8 * figwidth)}
grp = atm.FigGroup(2, 1, fig_kw=fig_kw)
for ssn in ['JAN', 'JUL']:
grp.next()
precip_global(pcp[ssn])
plt.title(ssn_dict[ssn])
# Hovmoller plot of sector mean precip
def hovmoller(precip, clev=np.arange(0, 12.5, 1), cticks=np.arange(0, 12.5, 2),
cmap='hot_r', ylimits=(-40, 40)):
lat = atm.get_coord(precip, 'lat')
days = atm.get_coord(precip, 'day')
plt.contourf(days, lat, precip.T, clev, cmap=cmap, extend='max')
cb = plt.colorbar(ticks=cticks)
cb.ax.set_title('mm/day', fontsize=12)
plt.ylim(ylimits)
plt.xlim(2, 365)
plt.ylabel('Latitude')
plt.xlabel('Day of Year')
plt.figure(figsize=(0.8 * figwidth, 0.4*figwidth))
hovmoller(pcp['SECTOR'])
# Map of monsoon region
plt.figure(figsize=(0.4*figwidth, 0.6*figwidth))
m = atm.init_latlon(-50, 50, 40, 120, coastlines=False)
m.shadedrelief(scale=0.3)
yticks = range(-45, 46, 15)
xticks = range(40, 121, 20)
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
#atm.geobox(10, 30, 60, 100, m=m, color='k')
# JJAS precip and fraction of annual totals
axlims = (-15, 35, 50, 115)
xticks = range(40, 121, 10)
clev = np.arange(0, 18.5, 1)
plt.figure(figsize=(0.8*figwidth, 0.5*figwidth))
m = atm.init_latlon(axlims[0], axlims[1], axlims[2], axlims[3], resolution='l')
atm.contourf_latlon(pcp['JJAS'], clev=clev, m=m, cmap='hot_r', extend='max')
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
_, cs = atm.contour_latlon(pcp['FRAC'], clev=[0.5], m=m, colors='b',
linewidths=2)
label_locs = [(65, 12)]
cs_opts = {'fmt' : '%.1f', 'fontsize' : fontsize, 'manual' : label_locs}
plt.clabel(cs, **cs_opts)
atm.geobox(10, 30, 60, 100, m=m, color='g')
plt.xlim(axlims[2], axlims[3])
# Mixed layer depths
def mld_map(mld, cmap='Blues', axlims=(0, 35, 58, 102), climits=(10, 60),
cticks=range(10, 71, 10), clevs=None):
cb_kwargs = {'ticks' : cticks, 'extend' : 'both'}
m = atm.init_latlon(axlims[0], axlims[1], axlims[2], axlims[3],
resolution='l', coastlines=False,
fillcontinents=True)
m.drawcoastlines(linewidth=0.5, color='0.5')
atm.pcolor_latlon(mld, m=m, cmap=cmap, cb_kwargs=cb_kwargs)
plt.clim(climits)
lat0 = 15.5
plt.figure(figsize=(0.5*figwidth, 0.35*figwidth))
mld_map(mld)
plt.axhline(lat0, color='k')
# ------------------------------------------------------------------------
# Animation of precip and winds
def animate(i):
days = range(-136, 227, 1)
day = days[i]
axlims=(-30, 45, 40, 120)
dx, dy = 5, 5
climits=(0, 20)
cmap = 'hot_r'
d0 = 138
cticks=np.arange(4, 21, 2)
scale = 250
clev=np.arange(4, 20.5, 1)
lat1, lat2, lon1, lon2 = axlims
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
xticks = range(40, 121, 20)
yticks = range(-20, 41, 10)
mm, dd = atm.jday_to_mmdd(day + d0)
title = (atm.month_str(mm)).capitalize() + ' %d' % dd
u = atm.subset(data['U'].sel(dayrel=day), subset_dict)
v = atm.subset(data['V'].sel(dayrel=day), subset_dict)
u = u[::dy, ::dx]
v = v[::dy, ::dx]
#spd = np.sqrt(u**2 + v**2)
pcp = data['PREC'].sel(dayrel=day)
lat = atm.get_coord(u, 'lat')
lon = atm.get_coord(u, 'lon')
plt.clf()
m = atm.init_latlon(lat1, lat2, lon1, lon2, coastlines=False)
m.drawcoastlines(color='k', linewidth=0.5)
m.shadedrelief(scale=0.3)
atm.contourf_latlon(pcp, clev=clev, axlims=axlims, m=m, cmap=cmap,
extend='max', cb_kwargs={'ticks' : cticks})
#atm.pcolor_latlon(pcp, axlims=axlims, cmap=cmap, cb_kwargs={'extend' : 'max'})
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
plt.clim(climits)
#plt.quiver(lon, lat, u, v, linewidths=spd.values.ravel())
plt.quiver(lon, lat, u, v, scale=scale, pivot='middle')
plt.title(title)
plt.draw()
fig = plt.figure()
days = range(-136, 227, 1)
#anim = animation.FuncAnimation(fig, animate, frames=len(days),
# interval=20, blit=True)
#anim = animation.FuncAnimation(fig, animate, frames=len(days))
anim = animation.FuncAnimation(fig, animate, frames=30)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
writer=animation.FFMpegWriter(bitrate=500)
print('Saving animation')
anim.save('figs/anim/test.mp4', writer=writer, fps=30)
print('Done')
# --------------------------------------------------------------------------
# def animate(data, day, axlims=(-30, 45, 40, 120), dx=5, dy=5, climits=(0, 20),
# cmap='hot_r', d0=138, clev=np.arange(4, 20.5, 1),
# cticks=np.arange(4, 21, 2), scale=250):
# lat1, lat2, lon1, lon2 = axlims
# subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
# xticks = range(40, 121, 20)
# yticks = range(-20, 41, 10)
# mm, dd = atm.jday_to_mmdd(day + d0)
# title = (atm.month_str(mm)).capitalize() + ' %d' % dd
#
# u = atm.subset(data['U'].sel(dayrel=day), subset_dict)
# v = atm.subset(data['V'].sel(dayrel=day), subset_dict)
# u = u[::dy, ::dx]
# v = v[::dy, ::dx]
# #spd = np.sqrt(u**2 + v**2)
# pcp = data['PREC'].sel(dayrel=day)
# lat = atm.get_coord(u, 'lat')
# lon = atm.get_coord(u, 'lon')
#
# plt.clf()
# m = atm.init_latlon(lat1, lat2, lon1, lon2, coastlines=False)
# m.drawcoastlines(color='k', linewidth=0.5)
# m.shadedrelief(scale=0.3)
# atm.contourf_latlon(pcp, clev=clev, axlims=axlims, m=m, cmap=cmap,
# extend='max', cb_kwargs={'ticks' : cticks})
# #atm.pcolor_latlon(pcp, axlims=axlims, cmap=cmap, cb_kwargs={'extend' : 'max'})
# plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
# plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
# plt.clim(climits)
# #plt.quiver(lon, lat, u, v, linewidths=spd.values.ravel())
# plt.quiver(lon, lat, u, v, scale=scale, pivot='middle')
# plt.title(title)
# plt.draw()
#
#
# days = range(-136, 227, 1)
# plt.figure()
# for i, day in enumerate(days):
# animate(data, day)
# filenm = 'figs/anim/frame%03d.png' % i
# print('Saving to ' + filenm)
# plt.savefig(filenm)
|
mit
| 6,556,450,848,463,915,000
| 35.773077
| 85
| 0.606422
| false
| 2.60802
| false
| false
| false
|
brunobord/critica
|
apps/articles_epicurien/admin.py
|
1
|
2409
|
# -*- coding: utf-8 -*-
"""
Administration interface options for ``critica.apps.articles_epicurien`` models.
"""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from critica.apps.admin.sites import basic_site, advanced_site
from critica.apps.articles.admin import BaseArticleAdmin
from critica.apps.articles_epicurien.models import ArticleEpicurien, ArticleEpicurienType
class ArticleEpicurienTypeAdmin(admin.ModelAdmin):
"""
Administration interface options of ``ArticleEpicurienType`` model.
"""
list_display = ('name', 'slug')
admin.site.register(ArticleEpicurienType, ArticleEpicurienTypeAdmin)
basic_site.register(ArticleEpicurienType, ArticleEpicurienTypeAdmin)
advanced_site.register(ArticleEpicurienType, ArticleEpicurienTypeAdmin)
class ArticleEpicurienAdmin(BaseArticleAdmin):
"""
Administration interface options of ``Article`` model.
"""
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets for the add form.
"""
fieldsets = [
(_('Headline'), {'fields': ('author_nickname', 'title', 'opinion')}),
(_('Filling'), {'fields': ('issues', 'type', 'tags')}),
(_('Illustration'), {'fields': ('illustration', 'use_default_illustration')}),
(_('Content'), {'fields': ('summary', 'content')}),
]
publication_fields = []
if request.user.has_perm('articles_epicurien.can_feature_article'):
publication_fields.append('is_featured')
if request.user.has_perm('articles_epicurien.can_reserve_article'):
publication_fields.append('is_reserved')
if request.user.has_perm('articles_epicurien.can_publish_article'):
publication_fields.append('is_ready_to_publish')
if request.user.has_perm('articles_epicurien.can_reserve_article') \
or request.user.has_perm('articles_epicurien.can_feature_article') \
or request.user.has_perm('articles_epicurien.can_publish_article'):
fieldsets += [(_('Publication'), {'fields': publication_fields})]
return fieldsets
admin.site.register(ArticleEpicurien, ArticleEpicurienAdmin)
basic_site.register(ArticleEpicurien, ArticleEpicurienAdmin)
advanced_site.register(ArticleEpicurien, ArticleEpicurienAdmin)
|
gpl-3.0
| -3,054,804,316,831,667,000
| 38.491803
| 90
| 0.674554
| false
| 3.775862
| false
| false
| false
|
palerdot/calibre
|
src/calibre/db/backend.py
|
1
|
68501
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
# Imports {{{
import os, shutil, uuid, json, glob, time, cPickle, hashlib, errno
from functools import partial
import apsw
from calibre import isbytestring, force_unicode, prints
from calibre.constants import (iswindows, filesystem_encoding,
preferred_encoding)
from calibre.ptempfile import PersistentTemporaryFile, TemporaryFile
from calibre.db import SPOOL_SIZE
from calibre.db.schema_upgrades import SchemaUpgrade
from calibre.db.delete_service import delete_service
from calibre.db.errors import NoSuchFormat
from calibre.library.field_metadata import FieldMetadata
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.utils.icu import sort_key
from calibre.utils.config import to_json, from_json, prefs, tweaks
from calibre.utils.date import utcfromtimestamp, parse_date
from calibre.utils.filenames import (
is_case_sensitive, samefile, hardlink_file, ascii_filename,
WindowsAtomicFolderMove, atomic_rename, remove_dir_if_empty)
from calibre.utils.magick.draw import save_cover_data_to
from calibre.utils.formatter_functions import load_user_template_functions
from calibre.db.tables import (OneToOneTable, ManyToOneTable, ManyToManyTable,
SizeTable, FormatsTable, AuthorsTable, IdentifiersTable, PathTable,
CompositeTable, UUIDTable, RatingTable)
# }}}
'''
Differences in semantics from pysqlite:
1. execute/executemany operate in autocommit mode
2. There is no fetchone() method on cursor objects, instead use next()
3. There is no executescript
'''
CUSTOM_DATA_TYPES = frozenset(['rating', 'text', 'comments', 'datetime',
'int', 'float', 'bool', 'series', 'composite', 'enumeration'])
class DynamicFilter(object): # {{{
'No longer used, present for legacy compatibility'
def __init__(self, name):
self.name = name
self.ids = frozenset([])
def __call__(self, id_):
return int(id_ in self.ids)
def change(self, ids):
self.ids = frozenset(ids)
# }}}
class DBPrefs(dict): # {{{
'Store preferences as key:value pairs in the db'
def __init__(self, db):
dict.__init__(self)
self.db = db
self.defaults = {}
self.disable_setting = False
self.load_from_db()
def load_from_db(self):
self.clear()
for key, val in self.db.conn.get('SELECT key,val FROM preferences'):
try:
val = self.raw_to_object(val)
except:
prints('Failed to read value for:', key, 'from db')
continue
dict.__setitem__(self, key, val)
def raw_to_object(self, raw):
if not isinstance(raw, unicode):
raw = raw.decode(preferred_encoding)
return json.loads(raw, object_hook=from_json)
def to_raw(self, val):
return json.dumps(val, indent=2, default=to_json)
def has_setting(self, key):
return key in self
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.defaults[key]
def __delitem__(self, key):
dict.__delitem__(self, key)
self.db.execute('DELETE FROM preferences WHERE key=?', (key,))
def __setitem__(self, key, val):
if self.disable_setting:
return
raw = self.to_raw(val)
self.db.execute('INSERT OR REPLACE INTO preferences (key,val) VALUES (?,?)', (key, raw))
dict.__setitem__(self, key, val)
def set(self, key, val):
self.__setitem__(key, val)
def get_namespaced(self, namespace, key, default=None):
key = u'namespaced:%s:%s'%(namespace, key)
try:
return dict.__getitem__(self, key)
except KeyError:
return default
def set_namespaced(self, namespace, key, val):
if u':' in key:
raise KeyError('Colons are not allowed in keys')
if u':' in namespace:
raise KeyError('Colons are not allowed in the namespace')
key = u'namespaced:%s:%s'%(namespace, key)
self[key] = val
def write_serialized(self, library_path):
try:
to_filename = os.path.join(library_path, 'metadata_db_prefs_backup.json')
with open(to_filename, "wb") as f:
f.write(json.dumps(self, indent=2, default=to_json))
except:
import traceback
traceback.print_exc()
@classmethod
def read_serialized(cls, library_path, recreate_prefs=False):
from_filename = os.path.join(library_path,
'metadata_db_prefs_backup.json')
with open(from_filename, "rb") as f:
return json.load(f, object_hook=from_json)
# }}}
# Extra collators {{{
def pynocase(one, two, encoding='utf-8'):
if isbytestring(one):
try:
one = one.decode(encoding, 'replace')
except:
pass
if isbytestring(two):
try:
two = two.decode(encoding, 'replace')
except:
pass
return cmp(one.lower(), two.lower())
def _author_to_author_sort(x):
if not x:
return ''
return author_to_author_sort(x.replace('|', ','))
def icu_collator(s1, s2):
return cmp(sort_key(force_unicode(s1, 'utf-8')),
sort_key(force_unicode(s2, 'utf-8')))
# }}}
# Unused aggregators {{{
def Concatenate(sep=','):
'''String concatenation aggregator for sqlite'''
def step(ctxt, value):
if value is not None:
ctxt.append(value)
def finalize(ctxt):
if not ctxt:
return None
return sep.join(ctxt)
return ([], step, finalize)
def SortedConcatenate(sep=','):
'''String concatenation aggregator for sqlite, sorted by supplied index'''
def step(ctxt, ndx, value):
if value is not None:
ctxt[ndx] = value
def finalize(ctxt):
if len(ctxt) == 0:
return None
return sep.join(map(ctxt.get, sorted(ctxt.iterkeys())))
return ({}, step, finalize)
def IdentifiersConcat():
'''String concatenation aggregator for the identifiers map'''
def step(ctxt, key, val):
ctxt.append(u'%s:%s'%(key, val))
def finalize(ctxt):
return ','.join(ctxt)
return ([], step, finalize)
def AumSortedConcatenate():
'''String concatenation aggregator for the author sort map'''
def step(ctxt, ndx, author, sort, link):
if author is not None:
ctxt[ndx] = ':::'.join((author, sort, link))
def finalize(ctxt):
keys = list(ctxt.iterkeys())
l = len(keys)
if l == 0:
return None
if l == 1:
return ctxt[keys[0]]
return ':#:'.join([ctxt[v] for v in sorted(keys)])
return ({}, step, finalize)
# }}}
class Connection(apsw.Connection): # {{{
BUSY_TIMEOUT = 10000 # milliseconds
def __init__(self, path):
apsw.Connection.__init__(self, path)
self.setbusytimeout(self.BUSY_TIMEOUT)
self.execute('pragma cache_size=5000')
self.execute('pragma temp_store=2')
encoding = self.execute('pragma encoding').next()[0]
self.createcollation('PYNOCASE', partial(pynocase,
encoding=encoding))
self.createscalarfunction('title_sort', title_sort, 1)
self.createscalarfunction('author_to_author_sort',
_author_to_author_sort, 1)
self.createscalarfunction('uuid4', lambda: str(uuid.uuid4()),
0)
# Dummy functions for dynamically created filters
self.createscalarfunction('books_list_filter', lambda x: 1, 1)
self.createcollation('icucollate', icu_collator)
# Legacy aggregators (never used) but present for backwards compat
self.createaggregatefunction('sortconcat', SortedConcatenate, 2)
self.createaggregatefunction('sortconcat_bar',
partial(SortedConcatenate, sep='|'), 2)
self.createaggregatefunction('sortconcat_amper',
partial(SortedConcatenate, sep='&'), 2)
self.createaggregatefunction('identifiers_concat',
IdentifiersConcat, 2)
self.createaggregatefunction('concat', Concatenate, 1)
self.createaggregatefunction('aum_sortconcat',
AumSortedConcatenate, 4)
def create_dynamic_filter(self, name):
f = DynamicFilter(name)
self.createscalarfunction(name, f, 1)
def get(self, *args, **kw):
ans = self.cursor().execute(*args)
if kw.get('all', True):
return ans.fetchall()
try:
return ans.next()[0]
except (StopIteration, IndexError):
return None
def execute(self, sql, bindings=None):
cursor = self.cursor()
return cursor.execute(sql, bindings)
def executemany(self, sql, sequence_of_bindings):
with self: # Disable autocommit mode, for performance
return self.cursor().executemany(sql, sequence_of_bindings)
# }}}
class DB(object):
PATH_LIMIT = 40 if iswindows else 100
WINDOWS_LIBRARY_PATH_LIMIT = 75
# Initialize database {{{
def __init__(self, library_path, default_prefs=None, read_only=False,
restore_all_prefs=False, progress_callback=lambda x, y:True):
try:
if isbytestring(library_path):
library_path = library_path.decode(filesystem_encoding)
except:
import traceback
traceback.print_exc()
self.field_metadata = FieldMetadata()
self.library_path = os.path.abspath(library_path)
self.dbpath = os.path.join(library_path, 'metadata.db')
self.dbpath = os.environ.get('CALIBRE_OVERRIDE_DATABASE_PATH',
self.dbpath)
if iswindows and len(self.library_path) + 4*self.PATH_LIMIT + 10 > 259:
raise ValueError(_(
'Path to library ({0}) too long. Must be less than'
' {1} characters.').format(self.library_path, 259-4*self.PATH_LIMIT-10))
exists = self._exists = os.path.exists(self.dbpath)
if not exists:
# Be more strict when creating new libraries as the old calculation
# allowed for max path lengths of 265 chars.
if (iswindows and len(self.library_path) >
self.WINDOWS_LIBRARY_PATH_LIMIT):
raise ValueError(_(
'Path to library too long. Must be less than'
' %d characters.')%self.WINDOWS_LIBRARY_PATH_LIMIT)
if read_only and os.path.exists(self.dbpath):
# Work on only a copy of metadata.db to ensure that
# metadata.db is not changed
pt = PersistentTemporaryFile('_metadata_ro.db')
pt.close()
shutil.copyfile(self.dbpath, pt.name)
self.dbpath = pt.name
if not os.path.exists(os.path.dirname(self.dbpath)):
os.makedirs(os.path.dirname(self.dbpath))
self._conn = None
if self.user_version == 0:
self.initialize_database()
if not os.path.exists(self.library_path):
os.makedirs(self.library_path)
self.is_case_sensitive = is_case_sensitive(self.library_path)
SchemaUpgrade(self, self.library_path, self.field_metadata)
# Guarantee that the library_id is set
self.library_id
# Fix legacy triggers and columns
self.execute('''
DROP TRIGGER IF EXISTS author_insert_trg;
CREATE TEMP TRIGGER author_insert_trg
AFTER INSERT ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name) WHERE id=NEW.id;
END;
DROP TRIGGER IF EXISTS author_update_trg;
CREATE TEMP TRIGGER author_update_trg
BEFORE UPDATE ON authors
BEGIN
UPDATE authors SET sort=author_to_author_sort(NEW.name)
WHERE id=NEW.id AND name <> NEW.name;
END;
UPDATE authors SET sort=author_to_author_sort(name) WHERE sort IS NULL;
''')
# Initialize_prefs must be called before initialize_custom_columns because
# icc can set a pref.
self.initialize_prefs(default_prefs, restore_all_prefs, progress_callback)
self.initialize_custom_columns()
self.initialize_tables()
load_user_template_functions(self.library_id,
self.prefs.get('user_template_functions', []))
def initialize_prefs(self, default_prefs, restore_all_prefs, progress_callback): # {{{
self.prefs = DBPrefs(self)
if default_prefs is not None and not self._exists:
progress_callback(None, len(default_prefs))
# Only apply default prefs to a new database
for i, key in enumerate(default_prefs):
# be sure that prefs not to be copied are listed below
if restore_all_prefs or key not in frozenset(['news_to_be_synced']):
self.prefs[key] = default_prefs[key]
progress_callback(_('restored preference ') + key, i+1)
if 'field_metadata' in default_prefs:
fmvals = [f for f in default_prefs['field_metadata'].values()
if f['is_custom']]
progress_callback(None, len(fmvals))
for i, f in enumerate(fmvals):
progress_callback(_('creating custom column ') + f['label'], i)
self.create_custom_column(f['label'], f['name'],
f['datatype'],
(f['is_multiple'] is not None and
len(f['is_multiple']) > 0),
f['is_editable'], f['display'])
defs = self.prefs.defaults
defs['gui_restriction'] = defs['cs_restriction'] = ''
defs['categories_using_hierarchy'] = []
defs['column_color_rules'] = []
defs['column_icon_rules'] = []
defs['grouped_search_make_user_categories'] = []
defs['similar_authors_search_key'] = 'authors'
defs['similar_authors_match_kind'] = 'match_any'
defs['similar_publisher_search_key'] = 'publisher'
defs['similar_publisher_match_kind'] = 'match_any'
defs['similar_tags_search_key'] = 'tags'
defs['similar_tags_match_kind'] = 'match_all'
defs['similar_series_search_key'] = 'series'
defs['similar_series_match_kind'] = 'match_any'
defs['book_display_fields'] = [
('title', False), ('authors', True), ('formats', True),
('series', True), ('identifiers', True), ('tags', True),
('path', True), ('publisher', False), ('rating', False),
('author_sort', False), ('sort', False), ('timestamp', False),
('uuid', False), ('comments', True), ('id', False), ('pubdate', False),
('last_modified', False), ('size', False), ('languages', False),
]
defs['virtual_libraries'] = {}
defs['virtual_lib_on_startup'] = defs['cs_virtual_lib_on_startup'] = ''
defs['virt_libs_hidden'] = defs['virt_libs_order'] = ()
defs['update_all_last_mod_dates_on_start'] = False
defs['field_under_covers_in_grid'] = 'title'
# Migrate the bool tristate tweak
defs['bools_are_tristate'] = \
tweaks.get('bool_custom_columns_are_tristate', 'yes') == 'yes'
if self.prefs.get('bools_are_tristate') is None:
self.prefs.set('bools_are_tristate', defs['bools_are_tristate'])
# Migrate column coloring rules
if self.prefs.get('column_color_name_1', None) is not None:
from calibre.library.coloring import migrate_old_rule
old_rules = []
for i in range(1, 6):
col = self.prefs.get('column_color_name_'+str(i), None)
templ = self.prefs.get('column_color_template_'+str(i), None)
if col and templ:
try:
del self.prefs['column_color_name_'+str(i)]
rules = migrate_old_rule(self.field_metadata, templ)
for templ in rules:
old_rules.append((col, templ))
except:
pass
if old_rules:
self.prefs['column_color_rules'] += old_rules
# Migrate saved search and user categories to db preference scheme
def migrate_preference(key, default):
oldval = prefs[key]
if oldval != default:
self.prefs[key] = oldval
prefs[key] = default
if key not in self.prefs:
self.prefs[key] = default
migrate_preference('user_categories', {})
migrate_preference('saved_searches', {})
# migrate grouped_search_terms
if self.prefs.get('grouped_search_terms', None) is None:
try:
ogst = tweaks.get('grouped_search_terms', {})
ngst = {}
for t in ogst:
ngst[icu_lower(t)] = ogst[t]
self.prefs.set('grouped_search_terms', ngst)
except:
pass
# migrate the gui_restriction preference to a virtual library
gr_pref = self.prefs.get('gui_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['gui_restriction'] = ''
self.prefs['virtual_lib_on_startup'] = gr_pref
# migrate the cs_restriction preference to a virtual library
gr_pref = self.prefs.get('cs_restriction', None)
if gr_pref:
virt_libs = self.prefs.get('virtual_libraries', {})
virt_libs[gr_pref] = 'search:"' + gr_pref + '"'
self.prefs['virtual_libraries'] = virt_libs
self.prefs['cs_restriction'] = ''
self.prefs['cs_virtual_lib_on_startup'] = gr_pref
# Rename any user categories with names that differ only in case
user_cats = self.prefs.get('user_categories', [])
catmap = {}
for uc in user_cats:
ucl = icu_lower(uc)
if ucl not in catmap:
catmap[ucl] = []
catmap[ucl].append(uc)
cats_changed = False
for uc in catmap:
if len(catmap[uc]) > 1:
prints('found user category case overlap', catmap[uc])
cat = catmap[uc][0]
suffix = 1
while icu_lower((cat + unicode(suffix))) in catmap:
suffix += 1
prints('Renaming user category %s to %s'%(cat, cat+unicode(suffix)))
user_cats[cat + unicode(suffix)] = user_cats[cat]
del user_cats[cat]
cats_changed = True
if cats_changed:
self.prefs.set('user_categories', user_cats)
# }}}
def initialize_custom_columns(self): # {{{
self.custom_columns_deleted = False
with self.conn:
# Delete previously marked custom columns
for record in self.conn.get(
'SELECT id FROM custom_columns WHERE mark_for_delete=1'):
num = record[0]
table, lt = self.custom_table_names(num)
self.execute('''\
DROP INDEX IF EXISTS {table}_idx;
DROP INDEX IF EXISTS {lt}_aidx;
DROP INDEX IF EXISTS {lt}_bidx;
DROP TRIGGER IF EXISTS fkc_update_{lt}_a;
DROP TRIGGER IF EXISTS fkc_update_{lt}_b;
DROP TRIGGER IF EXISTS fkc_insert_{lt};
DROP TRIGGER IF EXISTS fkc_delete_{lt};
DROP TRIGGER IF EXISTS fkc_insert_{table};
DROP TRIGGER IF EXISTS fkc_delete_{table};
DROP VIEW IF EXISTS tag_browser_{table};
DROP VIEW IF EXISTS tag_browser_filtered_{table};
DROP TABLE IF EXISTS {table};
DROP TABLE IF EXISTS {lt};
'''.format(table=table, lt=lt)
)
self.prefs.set('update_all_last_mod_dates_on_start', True)
self.execute('DELETE FROM custom_columns WHERE mark_for_delete=1')
# Load metadata for custom columns
self.custom_column_label_map, self.custom_column_num_map = {}, {}
self.custom_column_num_to_label_map = {}
triggers = []
remove = []
custom_tables = self.custom_tables
for record in self.conn.get(
'SELECT label,name,datatype,editable,display,normalized,id,is_multiple FROM custom_columns'):
data = {
'label':record[0],
'name':record[1],
'datatype':record[2],
'editable':bool(record[3]),
'display':json.loads(record[4]),
'normalized':bool(record[5]),
'num':record[6],
'is_multiple':bool(record[7]),
}
if data['display'] is None:
data['display'] = {}
# set up the is_multiple separator dict
if data['is_multiple']:
if data['display'].get('is_names', False):
seps = {'cache_to_list': '|', 'ui_to_list': '&', 'list_to_ui': ' & '}
elif data['datatype'] == 'composite':
seps = {'cache_to_list': ',', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {'cache_to_list': '|', 'ui_to_list': ',', 'list_to_ui': ', '}
else:
seps = {}
data['multiple_seps'] = seps
table, lt = self.custom_table_names(data['num'])
if table not in custom_tables or (data['normalized'] and lt not in
custom_tables):
remove.append(data)
continue
self.custom_column_num_map[data['num']] = \
self.custom_column_label_map[data['label']] = data
self.custom_column_num_to_label_map[data['num']] = data['label']
# Create Foreign Key triggers
if data['normalized']:
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%lt
else:
trigger = 'DELETE FROM %s WHERE book=OLD.id;'%table
triggers.append(trigger)
if remove:
with self.conn:
for data in remove:
prints('WARNING: Custom column %r not found, removing.' %
data['label'])
self.execute('DELETE FROM custom_columns WHERE id=?',
(data['num'],))
if triggers:
with self.conn:
self.execute('''\
CREATE TEMP TRIGGER custom_books_delete_trg
AFTER DELETE ON books
BEGIN
%s
END;
'''%(' \n'.join(triggers)))
# Setup data adapters
def adapt_text(x, d):
if d['is_multiple']:
if x is None:
return []
if isinstance(x, (str, unicode, bytes)):
x = x.split(d['multiple_seps']['ui_to_list'])
x = [y.strip() for y in x if y.strip()]
x = [y.decode(preferred_encoding, 'replace') if not isinstance(y,
unicode) else y for y in x]
return [u' '.join(y.split()) for y in x]
else:
return x if x is None or isinstance(x, unicode) else \
x.decode(preferred_encoding, 'replace')
def adapt_datetime(x, d):
if isinstance(x, (str, unicode, bytes)):
x = parse_date(x, assume_utc=False, as_utc=False)
return x
def adapt_bool(x, d):
if isinstance(x, (str, unicode, bytes)):
x = x.lower()
if x == 'true':
x = True
elif x == 'false':
x = False
elif x == 'none':
x = None
else:
x = bool(int(x))
return x
def adapt_enum(x, d):
v = adapt_text(x, d)
if not v:
v = None
return v
def adapt_number(x, d):
if x is None:
return None
if isinstance(x, (str, unicode, bytes)):
if x.lower() == 'none':
return None
if d['datatype'] == 'int':
return int(x)
return float(x)
self.custom_data_adapters = {
'float': adapt_number,
'int': adapt_number,
'rating':lambda x,d: x if x is None else min(10., max(0., float(x))),
'bool': adapt_bool,
'comments': lambda x,d: adapt_text(x, {'is_multiple':False}),
'datetime': adapt_datetime,
'text':adapt_text,
'series':adapt_text,
'enumeration': adapt_enum
}
# Create Tag Browser categories for custom columns
for k in sorted(self.custom_column_label_map.iterkeys()):
v = self.custom_column_label_map[k]
if v['normalized']:
is_category = True
else:
is_category = False
is_m = v['multiple_seps']
tn = 'custom_column_{0}'.format(v['num'])
self.field_metadata.add_custom_field(label=v['label'],
table=tn, column='value', datatype=v['datatype'],
colnum=v['num'], name=v['name'], display=v['display'],
is_multiple=is_m, is_category=is_category,
is_editable=v['editable'], is_csp=False)
# }}}
def initialize_tables(self): # {{{
tables = self.tables = {}
for col in ('title', 'sort', 'author_sort', 'series_index', 'comments',
'timestamp', 'pubdate', 'uuid', 'path', 'cover',
'last_modified'):
metadata = self.field_metadata[col].copy()
if col == 'comments':
metadata['table'], metadata['column'] = 'comments', 'text'
if not metadata['table']:
metadata['table'], metadata['column'] = 'books', ('has_cover'
if col == 'cover' else col)
if not metadata['column']:
metadata['column'] = col
tables[col] = (PathTable if col == 'path' else UUIDTable if col == 'uuid' else OneToOneTable)(col, metadata)
for col in ('series', 'publisher'):
tables[col] = ManyToOneTable(col, self.field_metadata[col].copy())
for col in ('authors', 'tags', 'formats', 'identifiers', 'languages', 'rating'):
cls = {
'authors':AuthorsTable,
'formats':FormatsTable,
'identifiers':IdentifiersTable,
'rating':RatingTable,
}.get(col, ManyToManyTable)
tables[col] = cls(col, self.field_metadata[col].copy())
tables['size'] = SizeTable('size', self.field_metadata['size'].copy())
self.FIELD_MAP = {
'id':0, 'title':1, 'authors':2, 'timestamp':3, 'size':4,
'rating':5, 'tags':6, 'comments':7, 'series':8, 'publisher':9,
'series_index':10, 'sort':11, 'author_sort':12, 'formats':13,
'path':14, 'pubdate':15, 'uuid':16, 'cover':17, 'au_map':18,
'last_modified':19, 'identifiers':20, 'languages':21,
}
for k,v in self.FIELD_MAP.iteritems():
self.field_metadata.set_field_record_index(k, v, prefer_custom=False)
base = max(self.FIELD_MAP.itervalues())
for label_, data in self.custom_column_label_map.iteritems():
label = self.field_metadata.custom_field_prefix + label_
metadata = self.field_metadata[label].copy()
link_table = self.custom_table_names(data['num'])[1]
self.FIELD_MAP[data['num']] = base = base+1
self.field_metadata.set_field_record_index(label_, base,
prefer_custom=True)
if data['datatype'] == 'series':
# account for the series index column. Field_metadata knows that
# the series index is one larger than the series. If you change
# it here, be sure to change it there as well.
self.FIELD_MAP[str(data['num'])+'_index'] = base = base+1
self.field_metadata.set_field_record_index(label_+'_index', base,
prefer_custom=True)
if data['normalized']:
if metadata['is_multiple']:
tables[label] = ManyToManyTable(label, metadata,
link_table=link_table)
else:
tables[label] = ManyToOneTable(label, metadata,
link_table=link_table)
if metadata['datatype'] == 'series':
# Create series index table
label += '_index'
metadata = self.field_metadata[label].copy()
metadata['column'] = 'extra'
metadata['table'] = link_table
tables[label] = OneToOneTable(label, metadata)
else:
if data['datatype'] == 'composite':
tables[label] = CompositeTable(label, metadata)
else:
tables[label] = OneToOneTable(label, metadata)
self.FIELD_MAP['ondevice'] = base = base+1
self.field_metadata.set_field_record_index('ondevice', base, prefer_custom=False)
self.FIELD_MAP['marked'] = base = base+1
self.field_metadata.set_field_record_index('marked', base, prefer_custom=False)
self.FIELD_MAP['series_sort'] = base = base+1
self.field_metadata.set_field_record_index('series_sort', base, prefer_custom=False)
# }}}
@property
def conn(self):
if self._conn is None:
self._conn = Connection(self.dbpath)
if self._exists and self.user_version == 0:
self._conn.close()
os.remove(self.dbpath)
self._conn = Connection(self.dbpath)
return self._conn
def execute(self, sql, bindings=None):
try:
return self.conn.cursor().execute(sql, bindings)
except apsw.IOError:
# This can happen if the computer was suspended see for example:
# https://bugs.launchpad.net/bugs/1286522. Try to reopen the db
if not self.conn.getautocommit():
raise # We are in a transaction, re-opening the db will fail anyway
self.reopen(force=True)
return self.conn.cursor().execute(sql, bindings)
def executemany(self, sql, sequence_of_bindings):
try:
with self.conn: # Disable autocommit mode, for performance
return self.conn.cursor().executemany(sql, sequence_of_bindings)
except apsw.IOError:
# This can happen if the computer was suspended see for example:
# https://bugs.launchpad.net/bugs/1286522. Try to reopen the db
if not self.conn.getautocommit():
raise # We are in a transaction, re-opening the db will fail anyway
self.reopen(force=True)
with self.conn: # Disable autocommit mode, for performance
return self.conn.cursor().executemany(sql, sequence_of_bindings)
def get(self, *args, **kw):
ans = self.execute(*args)
if kw.get('all', True):
return ans.fetchall()
try:
return ans.next()[0]
except (StopIteration, IndexError):
return None
def last_insert_rowid(self):
return self.conn.last_insert_rowid()
def custom_field_name(self, label=None, num=None):
if label is not None:
return self.field_metadata.custom_field_prefix + label
return self.field_metadata.custom_field_prefix + self.custom_column_num_to_label_map[num]
def custom_field_metadata(self, label=None, num=None):
if label is not None:
return self.custom_column_label_map[label]
return self.custom_column_num_map[num]
def set_custom_column_metadata(self, num, name=None, label=None, is_editable=None, display=None):
changed = False
if name is not None:
self.execute('UPDATE custom_columns SET name=? WHERE id=?', (name, num))
changed = True
if label is not None:
self.execute('UPDATE custom_columns SET label=? WHERE id=?', (label, num))
changed = True
if is_editable is not None:
self.execute('UPDATE custom_columns SET editable=? WHERE id=?', (bool(is_editable), num))
self.custom_column_num_map[num]['is_editable'] = bool(is_editable)
changed = True
if display is not None:
self.execute('UPDATE custom_columns SET display=? WHERE id=?', (json.dumps(display), num))
changed = True
# Note: the caller is responsible for scheduling a metadata backup if necessary
return changed
def create_custom_column(self, label, name, datatype, is_multiple, editable=True, display={}): # {{{
import re
if not label:
raise ValueError(_('No label was provided'))
if re.match('^\w*$', label) is None or not label[0].isalpha() or label.lower() != label:
raise ValueError(_('The label must contain only lower case letters, digits and underscores, and start with a letter'))
if datatype not in CUSTOM_DATA_TYPES:
raise ValueError('%r is not a supported data type'%datatype)
normalized = datatype not in ('datetime', 'comments', 'int', 'bool',
'float', 'composite')
is_multiple = is_multiple and datatype in ('text', 'composite')
self.execute(
('INSERT INTO '
'custom_columns(label,name,datatype,is_multiple,editable,display,normalized)'
'VALUES (?,?,?,?,?,?,?)'),
(label, name, datatype, is_multiple, editable, json.dumps(display), normalized))
num = self.conn.last_insert_rowid()
if datatype in ('rating', 'int'):
dt = 'INT'
elif datatype in ('text', 'comments', 'series', 'composite', 'enumeration'):
dt = 'TEXT'
elif datatype in ('float',):
dt = 'REAL'
elif datatype == 'datetime':
dt = 'timestamp'
elif datatype == 'bool':
dt = 'BOOL'
collate = 'COLLATE NOCASE' if dt == 'TEXT' else ''
table, lt = self.custom_table_names(num)
if normalized:
if datatype == 'series':
s_index = 'extra REAL,'
else:
s_index = ''
lines = [
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
value %s NOT NULL %s,
UNIQUE(value));
'''%(table, dt, collate),
'CREATE INDEX %s_idx ON %s (value %s);'%(table, table, collate),
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
book INTEGER NOT NULL,
value INTEGER NOT NULL,
%s
UNIQUE(book, value)
);'''%(lt, s_index),
'CREATE INDEX %s_aidx ON %s (value);'%(lt,lt),
'CREATE INDEX %s_bidx ON %s (book);'%(lt,lt),
'''\
CREATE TRIGGER fkc_update_{lt}_a
BEFORE UPDATE OF book ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_{lt}_b
BEFORE UPDATE OF author ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
END;
END;
CREATE TRIGGER fkc_insert_{lt}
BEFORE INSERT ON {lt}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
WHEN (SELECT id from {table} WHERE id=NEW.value) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: value not in {table}')
END;
END;
CREATE TRIGGER fkc_delete_{lt}
AFTER DELETE ON {table}
BEGIN
DELETE FROM {lt} WHERE value=OLD.id;
END;
CREATE VIEW tag_browser_{table} AS SELECT
id,
value,
(SELECT COUNT(id) FROM {lt} WHERE value={table}.id) count,
(SELECT AVG(r.rating)
FROM {lt},
books_ratings_link as bl,
ratings as r
WHERE {lt}.value={table}.id and bl.book={lt}.book and
r.id = bl.rating and r.rating <> 0) avg_rating,
value AS sort
FROM {table};
CREATE VIEW tag_browser_filtered_{table} AS SELECT
id,
value,
(SELECT COUNT({lt}.id) FROM {lt} WHERE value={table}.id AND
books_list_filter(book)) count,
(SELECT AVG(r.rating)
FROM {lt},
books_ratings_link as bl,
ratings as r
WHERE {lt}.value={table}.id AND bl.book={lt}.book AND
r.id = bl.rating AND r.rating <> 0 AND
books_list_filter(bl.book)) avg_rating,
value AS sort
FROM {table};
'''.format(lt=lt, table=table),
]
else:
lines = [
'''\
CREATE TABLE %s(
id INTEGER PRIMARY KEY AUTOINCREMENT,
book INTEGER,
value %s NOT NULL %s,
UNIQUE(book));
'''%(table, dt, collate),
'CREATE INDEX %s_idx ON %s (book);'%(table, table),
'''\
CREATE TRIGGER fkc_insert_{table}
BEFORE INSERT ON {table}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
CREATE TRIGGER fkc_update_{table}
BEFORE UPDATE OF book ON {table}
BEGIN
SELECT CASE
WHEN (SELECT id from books WHERE id=NEW.book) IS NULL
THEN RAISE(ABORT, 'Foreign key violation: book not in books')
END;
END;
'''.format(table=table),
]
script = ' \n'.join(lines)
self.execute(script)
self.prefs.set('update_all_last_mod_dates_on_start', True)
return num
# }}}
def delete_custom_column(self, label=None, num=None):
data = self.custom_field_metadata(label, num)
self.execute('UPDATE custom_columns SET mark_for_delete=1 WHERE id=?', (data['num'],))
def close(self, force=False):
if getattr(self, '_conn', None) is not None:
self._conn.close(force)
del self._conn
def reopen(self, force=False):
self.close(force)
self._conn = None
self.conn
def dump_and_restore(self, callback=None, sql=None):
import codecs
from calibre.utils.apsw_shell import Shell
from contextlib import closing
if callback is None:
callback = lambda x: x
uv = int(self.user_version)
with TemporaryFile(suffix='.sql') as fname:
if sql is None:
callback(_('Dumping database to SQL') + '...')
with codecs.open(fname, 'wb', encoding='utf-8') as buf:
shell = Shell(db=self.conn, stdout=buf)
shell.process_command('.dump')
else:
with open(fname, 'wb') as buf:
buf.write(sql if isinstance(sql, bytes) else sql.encode('utf-8'))
with TemporaryFile(suffix='_tmpdb.db', dir=os.path.dirname(self.dbpath)) as tmpdb:
callback(_('Restoring database from SQL') + '...')
with closing(Connection(tmpdb)) as conn:
shell = Shell(db=conn, encoding='utf-8')
shell.process_command('.read ' + fname.replace(os.sep, '/'))
conn.execute('PRAGMA user_version=%d;'%uv)
self.close()
try:
atomic_rename(tmpdb, self.dbpath)
finally:
self.reopen()
def vacuum(self):
self.execute('VACUUM')
@dynamic_property
def user_version(self):
doc = 'The user version of this database'
def fget(self):
return self.conn.get('pragma user_version;', all=False)
def fset(self, val):
self.execute('pragma user_version=%d'%int(val))
return property(doc=doc, fget=fget, fset=fset)
def initialize_database(self):
metadata_sqlite = P('metadata_sqlite.sql', data=True,
allow_user_override=False).decode('utf-8')
cur = self.conn.cursor()
cur.execute('BEGIN EXCLUSIVE TRANSACTION')
try:
cur.execute(metadata_sqlite)
except:
cur.execute('ROLLBACK')
else:
cur.execute('COMMIT')
if self.user_version == 0:
self.user_version = 1
# }}}
def normpath(self, path):
path = os.path.abspath(os.path.realpath(path))
if not self.is_case_sensitive:
path = os.path.normcase(path).lower()
return path
def is_deletable(self, path):
return path and not self.normpath(self.library_path).startswith(self.normpath(path))
def rmtree(self, path):
if self.is_deletable(path):
try:
shutil.rmtree(path)
except:
import traceback
traceback.print_exc()
time.sleep(1) # In case something has temporarily locked a file
shutil.rmtree(path)
def construct_path_name(self, book_id, title, author):
'''
Construct the directory name for this book based on its metadata.
'''
book_id = ' (%d)' % book_id
l = self.PATH_LIMIT - (len(book_id) // 2) - 2
author = ascii_filename(author)[:l].decode('ascii', 'replace')
title = ascii_filename(title)[:l].decode('ascii', 'replace')
while author[-1] in (' ', '.'):
author = author[:-1]
if not author:
author = ascii_filename(_('Unknown')).decode(
'ascii', 'replace')
return '%s/%s%s' % (author, title, book_id)
def construct_file_name(self, book_id, title, author, extlen):
'''
Construct the file name for this book based on its metadata.
'''
extlen = max(extlen, 14) # 14 accounts for ORIGINAL_EPUB
# The PATH_LIMIT on windows already takes into account the doubling
# (it is used to enforce the total path length limit, individual path
# components can be much longer than the total path length would allow on
# windows).
l = (self.PATH_LIMIT - (extlen // 2) - 2) if iswindows else ((self.PATH_LIMIT - extlen - 2) // 2)
if l < 5:
raise ValueError('Extension length too long: %d' % extlen)
author = ascii_filename(author)[:l].decode('ascii', 'replace')
title = ascii_filename(title)[:l].decode('ascii', 'replace')
name = title + ' - ' + author
while name.endswith('.'):
name = name[:-1]
return name
# Database layer API {{{
def custom_table_names(self, num):
return 'custom_column_%d'%num, 'books_custom_column_%d_link'%num
@property
def custom_tables(self):
return set([x[0] for x in self.conn.get(
'SELECT name FROM sqlite_master WHERE type="table" AND '
'(name GLOB "custom_column_*" OR name GLOB "books_custom_column_*")')])
@classmethod
def exists_at(cls, path):
return path and os.path.exists(os.path.join(path, 'metadata.db'))
@dynamic_property
def library_id(self):
doc = ('The UUID for this library. As long as the user only operates'
' on libraries with calibre, it will be unique')
def fget(self):
if getattr(self, '_library_id_', None) is None:
ans = self.conn.get('SELECT uuid FROM library_id', all=False)
if ans is None:
ans = str(uuid.uuid4())
self.library_id = ans
else:
self._library_id_ = ans
return self._library_id_
def fset(self, val):
self._library_id_ = unicode(val)
self.execute('''
DELETE FROM library_id;
INSERT INTO library_id (uuid) VALUES (?);
''', (self._library_id_,))
return property(doc=doc, fget=fget, fset=fset)
def last_modified(self):
''' Return last modified time as a UTC datetime object '''
return utcfromtimestamp(os.stat(self.dbpath).st_mtime)
def read_tables(self):
'''
Read all data from the db into the python in-memory tables
'''
with self.conn: # Use a single transaction, to ensure nothing modifies the db while we are reading
for table in self.tables.itervalues():
try:
table.read(self)
except:
prints('Failed to read table:', table.name)
import pprint
pprint.pprint(table.metadata)
raise
def format_abspath(self, book_id, fmt, fname, path):
path = os.path.join(self.library_path, path)
fmt = ('.' + fmt.lower()) if fmt else ''
fmt_path = os.path.join(path, fname+fmt)
if os.path.exists(fmt_path):
return fmt_path
try:
candidates = glob.glob(os.path.join(path, '*'+fmt))
except: # If path contains strange characters this throws an exc
candidates = []
if fmt and candidates and os.path.exists(candidates[0]):
shutil.copyfile(candidates[0], fmt_path)
return fmt_path
def format_hash(self, book_id, fmt, fname, path):
path = self.format_abspath(book_id, fmt, fname, path)
if path is None:
raise NoSuchFormat('Record %d has no fmt: %s'%(book_id, fmt))
sha = hashlib.sha256()
with lopen(path, 'rb') as f:
while True:
raw = f.read(SPOOL_SIZE)
sha.update(raw)
if len(raw) < SPOOL_SIZE:
break
return sha.hexdigest()
def format_metadata(self, book_id, fmt, fname, path):
path = self.format_abspath(book_id, fmt, fname, path)
ans = {}
if path is not None:
stat = os.stat(path)
ans['path'] = path
ans['size'] = stat.st_size
ans['mtime'] = utcfromtimestamp(stat.st_mtime)
return ans
def has_format(self, book_id, fmt, fname, path):
return self.format_abspath(book_id, fmt, fname, path) is not None
def remove_formats(self, remove_map):
paths = []
for book_id, removals in remove_map.iteritems():
for fmt, fname, path in removals:
path = self.format_abspath(book_id, fmt, fname, path)
if path is not None:
paths.append(path)
try:
delete_service().delete_files(paths, self.library_path)
except:
import traceback
traceback.print_exc()
def cover_last_modified(self, path):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
try:
return utcfromtimestamp(os.stat(path).st_mtime)
except EnvironmentError:
pass # Cover doesn't exist
def copy_cover_to(self, path, dest, windows_atomic_move=None, use_hardlink=False):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if os.access(path, os.R_OK) and dest and not samefile(dest, path):
windows_atomic_move.copy_path_to(path, dest)
return True
else:
if os.access(path, os.R_OK):
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
try:
f = lopen(path, 'rb')
except (IOError, OSError) as e:
# Ensure the path that caused this error is reported
raise Exception('Failed to open %r with error: %s' % (path, e))
with f:
if hasattr(dest, 'write'):
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
return True
elif dest and not samefile(dest, path):
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
return False
def cover_or_cache(self, path, timestamp):
path = os.path.abspath(os.path.join(self.library_path, path, 'cover.jpg'))
try:
stat = os.stat(path)
except EnvironmentError:
return False, None, None
if abs(timestamp - stat.st_mtime) < 0.1:
return True, None, None
try:
f = lopen(path, 'rb')
except (IOError, OSError):
time.sleep(0.2)
f = lopen(path, 'rb')
with f:
return True, f.read(), stat.st_mtime
def set_cover(self, book_id, path, data):
path = os.path.abspath(os.path.join(self.library_path, path))
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, 'cover.jpg')
if callable(getattr(data, 'save', None)):
from calibre.gui2 import pixmap_to_data
data = pixmap_to_data(data)
elif callable(getattr(data, 'read', None)):
data = data.read()
if data is None:
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError):
time.sleep(0.2)
os.remove(path)
else:
try:
save_cover_data_to(data, path)
except (IOError, OSError):
time.sleep(0.2)
save_cover_data_to(data, path)
def copy_format_to(self, book_id, fmt, fname, path, dest,
windows_atomic_move=None, use_hardlink=False):
path = self.format_abspath(book_id, fmt, fname, path)
if path is None:
return False
if windows_atomic_move is not None:
if not isinstance(dest, basestring):
raise Exception("Error, you must pass the dest as a path when"
" using windows_atomic_move")
if dest:
if samefile(dest, path):
# Ensure that the file has the same case as dest
try:
if path != dest:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
windows_atomic_move.copy_path_to(path, dest)
else:
if hasattr(dest, 'write'):
with lopen(path, 'rb') as f:
shutil.copyfileobj(f, dest)
if hasattr(dest, 'flush'):
dest.flush()
elif dest:
if samefile(dest, path):
if not self.is_case_sensitive and path != dest:
# Ensure that the file has the same case as dest
try:
os.rename(path, dest)
except:
pass # Nothing too catastrophic happened, the cases mismatch, that's all
else:
if use_hardlink:
try:
hardlink_file(path, dest)
return True
except:
pass
with lopen(path, 'rb') as f, lopen(dest, 'wb') as d:
shutil.copyfileobj(f, d)
return True
def windows_check_if_files_in_use(self, paths):
'''
Raises an EACCES IOError if any of the files in the folder of book_id
are opened in another program on windows.
'''
if iswindows:
for path in paths:
spath = os.path.join(self.library_path, *path.split('/'))
wam = None
if os.path.exists(spath):
try:
wam = WindowsAtomicFolderMove(spath)
finally:
if wam is not None:
wam.close_handles()
def add_format(self, book_id, fmt, stream, title, author, path, current_name):
fmt = ('.' + fmt.lower()) if fmt else ''
fname = self.construct_file_name(book_id, title, author, len(fmt))
path = os.path.join(self.library_path, path)
dest = os.path.join(path, fname + fmt)
if not os.path.exists(path):
os.makedirs(path)
size = 0
if current_name is not None:
old_path = os.path.join(path, current_name + fmt)
if old_path != dest:
# Ensure that the old format file is not orphaned, this can
# happen if the algorithm in construct_file_name is changed.
try:
# rename rather than remove, so that if something goes
# wrong in the rest of this function, at least the file is
# not deleted
os.rename(old_path, dest)
except EnvironmentError as e:
if getattr(e, 'errno', None) != errno.ENOENT:
# Failing to rename the old format will at worst leave a
# harmless orphan, so log and ignore the error
import traceback
traceback.print_exc()
if (not getattr(stream, 'name', False) or not samefile(dest, stream.name)):
with lopen(dest, 'wb') as f:
shutil.copyfileobj(stream, f)
size = f.tell()
elif os.path.exists(dest):
size = os.path.getsize(dest)
return size, fname
def update_path(self, book_id, title, author, path_field, formats_field):
path = self.construct_path_name(book_id, title, author)
current_path = path_field.for_book(book_id, default_value='')
formats = formats_field.for_book(book_id, default_value=())
try:
extlen = max(len(fmt) for fmt in formats) + 1
except ValueError:
extlen = 10
fname = self.construct_file_name(book_id, title, author, extlen)
# Check if the metadata used to construct paths has changed
changed = False
for fmt in formats:
name = formats_field.format_fname(book_id, fmt)
if name and name != fname:
changed = True
break
if path == current_path and not changed:
return
spath = os.path.join(self.library_path, *current_path.split('/'))
tpath = os.path.join(self.library_path, *path.split('/'))
source_ok = current_path and os.path.exists(spath)
wam = WindowsAtomicFolderMove(spath) if iswindows and source_ok else None
format_map = {}
original_format_map = {}
try:
if not os.path.exists(tpath):
os.makedirs(tpath)
if source_ok: # Migrate existing files
dest = os.path.join(tpath, 'cover.jpg')
self.copy_cover_to(current_path, dest,
windows_atomic_move=wam, use_hardlink=True)
for fmt in formats:
dest = os.path.join(tpath, fname+'.'+fmt.lower())
format_map[fmt] = dest
ofmt_fname = formats_field.format_fname(book_id, fmt)
original_format_map[fmt] = os.path.join(spath, ofmt_fname+'.'+fmt.lower())
self.copy_format_to(book_id, fmt, ofmt_fname, current_path,
dest, windows_atomic_move=wam, use_hardlink=True)
# Update db to reflect new file locations
for fmt in formats:
formats_field.table.set_fname(book_id, fmt, fname, self)
path_field.table.set_path(book_id, path, self)
# Delete not needed files and directories
if source_ok:
if os.path.exists(spath):
if samefile(spath, tpath):
# The format filenames may have changed while the folder
# name remains the same
for fmt, opath in original_format_map.iteritems():
npath = format_map.get(fmt, None)
if npath and os.path.abspath(npath.lower()) != os.path.abspath(opath.lower()) and samefile(opath, npath):
# opath and npath are different hard links to the same file
os.unlink(opath)
else:
if wam is not None:
wam.delete_originals()
self.rmtree(spath)
parent = os.path.dirname(spath)
if len(os.listdir(parent)) == 0:
self.rmtree(parent)
finally:
if wam is not None:
wam.close_handles()
curpath = self.library_path
c1, c2 = current_path.split('/'), path.split('/')
if not self.is_case_sensitive and len(c1) == len(c2):
# On case-insensitive systems, title and author renames that only
# change case don't cause any changes to the directories in the file
# system. This can lead to having the directory names not match the
# title/author, which leads to trouble when libraries are copied to
# a case-sensitive system. The following code attempts to fix this
# by checking each segment. If they are different because of case,
# then rename the segment. Note that the code above correctly
# handles files in the directories, so no need to do them here.
for oldseg, newseg in zip(c1, c2):
if oldseg.lower() == newseg.lower() and oldseg != newseg:
try:
os.rename(os.path.join(curpath, oldseg),
os.path.join(curpath, newseg))
except:
break # Fail silently since nothing catastrophic has happened
curpath = os.path.join(curpath, newseg)
def write_backup(self, path, raw):
path = os.path.abspath(os.path.join(self.library_path, path, 'metadata.opf'))
try:
with lopen(path, 'wb') as f:
f.write(raw)
except EnvironmentError:
os.makedirs(os.path.dirname(path))
with lopen(path, 'wb') as f:
f.write(raw)
def read_backup(self, path):
path = os.path.abspath(os.path.join(self.library_path, path, 'metadata.opf'))
with lopen(path, 'rb') as f:
return f.read()
def remove_books(self, path_map, permanent=False):
self.executemany(
'DELETE FROM books WHERE id=?', [(x,) for x in path_map])
paths = {os.path.join(self.library_path, x) for x in path_map.itervalues() if x}
paths = {x for x in paths if os.path.exists(x) and self.is_deletable(x)}
if permanent:
for path in paths:
self.rmtree(path)
remove_dir_if_empty(os.path.dirname(path), ignore_metadata_caches=True)
else:
delete_service().delete_books(paths, self.library_path)
def add_custom_data(self, name, val_map, delete_first):
if delete_first:
self.execute('DELETE FROM books_plugin_data WHERE name=?', (name, ))
self.executemany(
'INSERT OR REPLACE INTO books_plugin_data (book, name, val) VALUES (?, ?, ?)',
[(book_id, name, json.dumps(val, default=to_json))
for book_id, val in val_map.iteritems()])
def get_custom_book_data(self, name, book_ids, default=None):
book_ids = frozenset(book_ids)
def safe_load(val):
try:
return json.loads(val, object_hook=from_json)
except:
return default
if len(book_ids) == 1:
bid = next(iter(book_ids))
ans = {book_id:safe_load(val) for book_id, val in
self.execute('SELECT book, val FROM books_plugin_data WHERE book=? AND name=?', (bid, name))}
return ans or {bid:default}
ans = {}
for book_id, val in self.execute(
'SELECT book, val FROM books_plugin_data WHERE name=?', (name,)):
if not book_ids or book_id in book_ids:
val = safe_load(val)
ans[book_id] = val
return ans
def delete_custom_book_data(self, name, book_ids):
if book_ids:
self.executemany('DELETE FROM books_plugin_data WHERE book=? AND name=?',
[(book_id, name) for book_id in book_ids])
else:
self.execute('DELETE FROM books_plugin_data WHERE name=?', (name,))
def get_ids_for_custom_book_data(self, name):
return frozenset(r[0] for r in self.execute('SELECT book FROM books_plugin_data WHERE name=?', (name,)))
def conversion_options(self, book_id, fmt):
for (data,) in self.conn.get('SELECT data FROM conversion_options WHERE book=? AND format=?', (book_id, fmt.upper())):
if data:
return cPickle.loads(bytes(data))
def has_conversion_options(self, ids, fmt='PIPE'):
ids = frozenset(ids)
with self.conn:
self.execute('DROP TABLE IF EXISTS conversion_options_temp; CREATE TEMP TABLE conversion_options_temp (id INTEGER PRIMARY KEY);')
self.executemany('INSERT INTO conversion_options_temp VALUES (?)', [(x,) for x in ids])
for (book_id,) in self.conn.get(
'SELECT book FROM conversion_options WHERE format=? AND book IN (SELECT id FROM conversion_options_temp)', (fmt.upper(),)):
return True
return False
def delete_conversion_options(self, book_ids, fmt):
self.executemany('DELETE FROM conversion_options WHERE book=? AND format=?',
[(book_id, fmt.upper()) for book_id in book_ids])
def set_conversion_options(self, options, fmt):
options = [(book_id, fmt.upper(), buffer(cPickle.dumps(data, -1))) for book_id, data in options.iteritems()]
self.executemany('INSERT OR REPLACE INTO conversion_options(book,format,data) VALUES (?,?,?)', options)
def get_top_level_move_items(self, all_paths):
items = set(os.listdir(self.library_path))
paths = set(all_paths)
paths.update({'metadata.db', 'metadata_db_prefs_backup.json'})
path_map = {x:x for x in paths}
if not self.is_case_sensitive:
for x in items:
path_map[x.lower()] = x
items = {x.lower() for x in items}
paths = {x.lower() for x in paths}
items = items.intersection(paths)
return items, path_map
def move_library_to(self, all_paths, newloc, progress=lambda x: x):
if not os.path.exists(newloc):
os.makedirs(newloc)
old_dirs = set()
items, path_map = self.get_top_level_move_items(all_paths)
for x in items:
src = os.path.join(self.library_path, x)
dest = os.path.join(newloc, path_map[x])
if os.path.isdir(src):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
old_dirs.add(src)
else:
if os.path.exists(dest):
os.remove(dest)
shutil.copyfile(src, dest)
x = path_map[x]
if not isinstance(x, unicode):
x = x.decode(filesystem_encoding, 'replace')
progress(x)
dbpath = os.path.join(newloc, os.path.basename(self.dbpath))
opath = self.dbpath
self.conn.close()
self.library_path, self.dbpath = newloc, dbpath
if self._conn is not None:
self._conn.close()
self._conn = None
self.conn
try:
os.unlink(opath)
except:
pass
for loc in old_dirs:
try:
shutil.rmtree(loc)
except:
pass
def restore_book(self, book_id, path, formats):
self.execute('UPDATE books SET path=? WHERE id=?', (path.replace(os.sep, '/'), book_id))
vals = [(book_id, fmt, size, name) for fmt, size, name in formats]
self.executemany('INSERT INTO data (book,format,uncompressed_size,name) VALUES (?,?,?,?)', vals)
# }}}
|
gpl-3.0
| -5,460,101,433,638,921,000
| 40.141742
| 141
| 0.527146
| false
| 4.183012
| false
| false
| false
|
brainwane/missing-from-wikipedia
|
webapp/application.py
|
1
|
1721
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request
import missing
app = Flask(__name__)
# take in names from datainput.html form
# run massagenames (implicitly chunks into 50 titles per request) and leftout
# return result to user in results.html form
def onWikipedia(names, lang):
names = missing.massagenames(names)
resultlist = missing.leftout(names, lang)
stats = missing.generate_statistics(resultlist, names)
return names, resultlist, stats
def askedToCheck(listofstrings):
l = len(listofstrings)
if l == 1:
return listofstrings[0]
elif l <= 4:
return ", ".join(listofstrings)
elif l > 4:
return "%s phrases: %s, %s... %s, %s" % (l, listofstrings[0], listofstrings[1], listofstrings[-2], listofstrings[-1])
@app.route('/index', methods=['GET', 'POST']) # form in template
def index():
if request.method == 'GET':
print "we did a get"
return render_template('datainput.html')
else: # request was POST
print "we did a POST!"
if 'pagename' in request.form:
namestocheck, language = request.form['pagename'].encode('utf-8'), request.form['langname']
namestocheck = namestocheck.split('\r\n')
else:
namefilestorage, language = request.files[('fileofnames')].stream, request.form['langname']
namestocheck = [line.strip('\n').decode('utf-8') for line in namefilestorage]
orig, checkresult, statistics = onWikipedia(namestocheck, language)
return render_template('results.html', checkname=askedToCheck(orig), result=checkresult, stats=statistics, target_lang=language)
if __name__ == "__main__":
app.run(debug=True)
|
gpl-3.0
| -7,242,098,130,690,440,000
| 35.617021
| 136
| 0.655433
| false
| 3.563147
| false
| false
| false
|
robbievanleeuwen/section-properties
|
sectionproperties/examples/example_frame.py
|
1
|
1555
|
import time
import numpy as np
import matplotlib.pyplot as plt
import sectionproperties.pre.sections as sections
from sectionproperties.analysis.cross_section import CrossSection
# create a rectangular section
geometry = sections.RectangularSection(d=100, b=50)
# create a list of mesh sizes to analyse
mesh_sizes = [1.5, 2, 2.5, 3, 4, 5, 10, 15, 20, 25, 30, 40, 50, 75, 100]
j_calc = [] # list to store torsion constants
t_calc = [] # list to store computation times
# loop through mesh sizes
for mesh_size in mesh_sizes:
mesh = geometry.create_mesh(mesh_sizes=[mesh_size]) # create mesh
section = CrossSection(geometry, mesh) # create a CrossSection object
start_time = time.time() # start timing
# calculate the frame properties
(_, _, _, _, j, _) = section.calculate_frame_properties()
t = time.time() - start_time # stop timing
t_calc.append(t) # save the time
j_calc.append(j) # save the torsion constant
# print the result
msg = "Mesh Size: {0}; ".format(mesh_size)
msg += "Solution Time {0:.5f} s; ".format(t)
msg += "Torsion Constant: {0:.12e}".format(j)
print(msg)
correct_val = j_calc[0] # assume the finest mesh gives the 'correct' value
j_np = np.array(j_calc) # convert results to a numpy array
error_vals = (j_calc - correct_val) / j_calc * 100 # compute the error
# produce a plot of the accuracy of the torsion constant with computation time
plt.loglog(t_calc[1:], error_vals[1:], 'kx-')
plt.xlabel("Solver Time [s]")
plt.ylabel("Torsion Constant Error [%]")
plt.show()
|
mit
| -2,314,972,156,369,502,700
| 38.871795
| 78
| 0.684887
| false
| 3.259958
| false
| false
| false
|
F-Secure/lokki-wp8
|
scripts/common.py
|
1
|
1958
|
"""
Copyright (c) 2014-2015 F-Secure
See LICENSE for details
"""
"""
Common methods to be used in converting localization files.
Copyright: F-Secure, 2012
"""
#defult product name in localization files
PRODUCT_NAME_DEFAULT_VALUE = "F-Secure Mobile Sync"
# id that defines default product name in localization file
PRODUCT_NAME_ID = "PRODUCT_NAME_LONG"
def find_node(nodes, string_id):
"""
Searches nodes and finds the node which contains attribute 'string_id'
Raises exception if suitable node is not found
"""
for node in nodes:
current_id = node.getAttribute("id")
if current_id == string_id:
return node
raise Exception("find_node failed! " + string_id + " was not found from nodes." )
class LocCustomizer():
def __init__(self):
self.productName = ""
def convert_product_name(self, string_value, string_id):
"""
Replaces product name from string_value if it exists
NOTE that when this method is called first time it should be called by using
PRODUCT_NAME_ID as a string_id value, so that customized product name is set correctly
"""
#Set correct product name
if string_id == PRODUCT_NAME_ID:
#Remove quotes for the begin and end of the string if they exists
if string_value[0] == "\"" and string_value[len(string_value)-1] == "\"":
self.productName = string_value[1:-1]
else:
self.productName = string_value
else:
if self.productName == "":
raise Exception("Product name is not set. It should be first item in localization xml")
if self.productName != PRODUCT_NAME_DEFAULT_VALUE:
#Default product name has been changed. Change that also from this string if it exists
string_value = string_value.replace(PRODUCT_NAME_DEFAULT_VALUE, self.productName)
return string_value
|
apache-2.0
| -7,674,815,097,206,625,000
| 37.392157
| 103
| 0.643514
| false
| 4.192719
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.