hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3afcf1619450fb2f8358266e12d53d04f162c5bd
| 6,262
|
py
|
Python
|
src/m5r_nested_loops_in_graphics.py
|
fernerme/14_LoopsWithinLoops
|
da23e6f1ee2e88cc695ff5202094f9ce404dccb4
|
[
"MIT"
] | null | null | null |
src/m5r_nested_loops_in_graphics.py
|
fernerme/14_LoopsWithinLoops
|
da23e6f1ee2e88cc695ff5202094f9ce404dccb4
|
[
"MIT"
] | null | null | null |
src/m5r_nested_loops_in_graphics.py
|
fernerme/14_LoopsWithinLoops
|
da23e6f1ee2e88cc695ff5202094f9ce404dccb4
|
[
"MIT"
] | null | null | null |
"""
This project demonstrates NESTED LOOPS (i.e., loops within loops)
in the context of TWO-DIMENSIONAL GRAPHICS.
Authors: David Mutchler, Valerie Galluzzi, Mark Hays, Amanda Stouder,
their colleagues and Melina Ferner.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the other functions to demonstrate them. """
nested_loops_in_graphics_example()
def nested_loops_in_graphics_example():
"""
Demonstrates nested loops in a TWO-DIMENSIONAL GRAPHICS example.
"""
width = 800
height = 600
title = 'Rectangles and Triangles of Circles'
window = rg.RoseWindow(width, height, title)
window.continue_on_mouse_click('Click to run Example 1.')
# ------------------------------------------------------------------
starting_point = rg.Point(50, 50)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# First set of circles
# ------------------------------------------------------------------
radius = 20
starting_circle = rg.Circle(starting_point, radius)
rectangle_of_circles(window, starting_circle.clone(), 4, 12)
window.continue_on_mouse_click('Click to run Example 2.')
# ------------------------------------------------------------------
# Second set of circles
# ------------------------------------------------------------------
starting_circle.move_by(180, 400)
rectangle_of_circles(window, starting_circle.clone(), 14, 2)
window.continue_on_mouse_click('Click to run Example 3.')
# ------------------------------------------------------------------
# Third and last set of circles
# ------------------------------------------------------------------
starting_circle.move_by(200, -400)
triangle_of_circles(window, starting_circle.clone(), 8)
window.close_on_mouse_click()
# ----------------------------------------------------------------------
# DONE: 2.
# *** Unless your instructor directs you otherwise,
# watch the video
# nested_loops_in_GRAPHICS.mp4
# in Preparation for Session 18
# ** NOW **
# As the video proceeds, LOOK AT THE CODE BELOW.
# It is the same as the video.
# (Pause the video when it completes the following problem.)
#
# *** USE THE VIDEO to understand the TECHNIQUE used in this example.
#
# AFTER you have watched the video, asking questions as needed,
# and you feel that you understand the TECHNIQUE it presents, THEN:
#
# *** Change the TO DO above to DONE. ***
# ----------------------------------------------------------------------
def rectangle_of_circles(window, circle, m, n):
"""
Draws an m x n rectangle of circles (i.e. m columns and n rows)
on the given rg.RoseWindow. The given rg.Circle specifies:
- The position of the upper-left circle drawn and also
- The radius that all the circles have.
After drawing each circle, pauses briefly (0.1 second).
Preconditions:
:type window: rg.RoseWindow
:type circle: rg.Circle
:type m: int
:type n: int
and m and n are small, positive integers.
"""
original_x = circle.center.x
original_y = circle.center.y
radius = circle.radius
x = original_x
y = original_y
for i in range(n): # Loop through the rows
for j in range(m): # Loop through the columns
new_circle = rg.Circle(rg.Point(x, y), radius)
new_circle.attach_to(window)
window.render(0.1)
x = x + (2 * radius) # Move x to the right, for next circle
y = y + 2 * radius # Move y down, for the next row of circles
x = original_x # Reset x to the left-edge, for the next row
# ----------------------------------------------------------------------
# DONE: 3.
# *** Unless your instructor directs you otherwise,
# watch the video
# nested_loops_in_GRAPHICS.mp4
# in Preparation for Session 18
# ** NOW **
# As the video proceeds, LOOK AT THE CODE BELOW.
# It is the same as the video.
# (Continue the video from where you paused it in the previous problem.)
#
# *** USE THE VIDEO to understand the TECHNIQUE used in this example.
#
# AFTER you have watched the video, asking questions as needed,
# and you feel that you understand the TECHNIQUE it presents, THEN:
#
# *** Change the TO DO above to DONE. ***
# ----------------------------------------------------------------------
def triangle_of_circles(window, circle, n):
"""
Draws an n x n right-triangle of circles
(1 circle in the top row, 2 in the next row, etc., until n rows)
on the given rg.RoseWindow. The given rg.Circle specifies:
- The position of the upper-left circle drawn and also
- The radius that all the circles have.
After drawing each circle, pauses briefly (0.1 second).
Preconditions:
:type window: rg.RoseWindow
:type circle: rg.Circle
:type n: int
and m is a small, positive integer.
"""
# ------------------------------------------------------------------
# NOTE: The solution below is IDENTICAL to the rectangle_of_circles
# solution except that the INNER loop has j+1 instead of m.
# Make sure you understand why this works!
# ------------------------------------------------------------------
original_x = circle.center.x
original_y = circle.center.y
radius = circle.radius
x = original_x
y = original_y
for j in range(n): # Loop through the rows
for _ in range(j + 1): # Loop through the columns
new_circle = rg.Circle(rg.Point(x, y), radius)
new_circle.attach_to(window.initial_canvas)
window.render(0.1)
x = x + (2 * radius) # Move x to the right, for next circle
y = y + 2 * radius # Move y down, for the next row of circles
x = original_x # Reset x to the left-edge, for the next row
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 36.406977
| 77
| 0.535931
|
25c8825f6edc8271426c0b35b27260056a947621
| 3,379
|
py
|
Python
|
homeassistant/components/rfxtrx/device_trigger.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
homeassistant/components/rfxtrx/device_trigger.py
|
DoctorU/core
|
5b218d7e1c4164e32d41473977459cbaf23adf42
|
[
"Apache-2.0"
] | 87
|
2020-07-15T13:43:35.000Z
|
2022-03-23T07:43:10.000Z
|
homeassistant/components/rfxtrx/device_trigger.py
|
winning1120xx/home-assistant
|
53d4c0ce2d374b5e97bbdc37742656c27adf8eea
|
[
"Apache-2.0"
] | 7
|
2018-10-04T10:12:45.000Z
|
2021-12-29T20:55:40.000Z
|
"""Provides device automations for RFXCOM RFXtrx."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.automation import (
AutomationActionType,
AutomationTriggerInfo,
)
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.components.rfxtrx.const import EVENT_RFXTRX_EVENT
from homeassistant.const import (
ATTR_DEVICE_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
from .helpers import async_get_device_object
CONF_SUBTYPE = "subtype"
CONF_TYPE_COMMAND = "command"
CONF_TYPE_STATUS = "status"
TRIGGER_SELECTION = {
CONF_TYPE_COMMAND: "COMMANDS",
CONF_TYPE_STATUS: "STATUS",
}
TRIGGER_TYPES = [
CONF_TYPE_COMMAND,
CONF_TYPE_STATUS,
]
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
vol.Required(CONF_SUBTYPE): str,
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> list[dict]:
"""List device triggers for RFXCOM RFXtrx devices."""
device = async_get_device_object(hass, device_id)
triggers = []
for conf_type in TRIGGER_TYPES:
data = getattr(device, TRIGGER_SELECTION[conf_type], {})
for command in data.values():
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_TYPE: conf_type,
CONF_SUBTYPE: command,
}
)
return triggers
async def async_validate_trigger_config(hass, config):
"""Validate config."""
config = TRIGGER_SCHEMA(config)
device = async_get_device_object(hass, config[CONF_DEVICE_ID])
action_type = config[CONF_TYPE]
sub_type = config[CONF_SUBTYPE]
commands = getattr(device, TRIGGER_SELECTION[action_type], {})
if config[CONF_SUBTYPE] not in commands.values():
raise InvalidDeviceAutomationConfig(
f"Subtype {sub_type} not found in device triggers {commands}"
)
return config
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: AutomationTriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
event_data = {ATTR_DEVICE_ID: config[CONF_DEVICE_ID]}
if config[CONF_TYPE] == CONF_TYPE_COMMAND:
event_data["values"] = {"Command": config[CONF_SUBTYPE]}
elif config[CONF_TYPE] == CONF_TYPE_STATUS:
event_data["values"] = {"Status": config[CONF_SUBTYPE]}
event_config = event_trigger.TRIGGER_SCHEMA(
{
event_trigger.CONF_PLATFORM: "event",
event_trigger.CONF_EVENT_TYPE: EVENT_RFXTRX_EVENT,
event_trigger.CONF_EVENT_DATA: event_data,
}
)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
| 29.640351
| 82
| 0.702279
|
b290808c6fa5e40545fcb9fb90c86297b422b089
| 1,412
|
py
|
Python
|
pytorch_lightning/metrics/regression/ssim.py
|
lyuxingjian/pytorch-lightning
|
576afd59f3456eb06fb8805ed65a24b41c428a3e
|
[
"Apache-2.0"
] | 1
|
2021-03-25T22:39:21.000Z
|
2021-03-25T22:39:21.000Z
|
pytorch_lightning/metrics/regression/ssim.py
|
lyuxingjian/pytorch-lightning
|
576afd59f3456eb06fb8805ed65a24b41c428a3e
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/metrics/regression/ssim.py
|
lyuxingjian/pytorch-lightning
|
576afd59f3456eb06fb8805ed65a24b41c428a3e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional, Sequence
from torchmetrics import SSIM as _SSIM
from pytorch_lightning.utilities.deprecation import deprecated
class SSIM(_SSIM):
@deprecated(target=_SSIM, ver_deprecate="1.3.0", ver_remove="1.5.0")
def __init__(
self,
kernel_size: Sequence[int] = (11, 11),
sigma: Sequence[float] = (1.5, 1.5),
reduction: str = "elementwise_mean",
data_range: Optional[float] = None,
k1: float = 0.01,
k2: float = 0.03,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
"""
This implementation refers to :class:`~torchmetrics.SSIM`.
.. deprecated::
Use :class:`~torchmetrics.SSIM`. Will be removed in v1.5.0.
"""
| 33.619048
| 74
| 0.674221
|
d86f161535d08da32811579e68d103a3a76a15e7
| 1,413
|
py
|
Python
|
PYQT/pyqt_18_menuaction_trigger.py
|
dogancantorun8/python-application
|
3ef972e52bb6950108cde36974ceaf5c3cde3667
|
[
"MIT"
] | null | null | null |
PYQT/pyqt_18_menuaction_trigger.py
|
dogancantorun8/python-application
|
3ef972e52bb6950108cde36974ceaf5c3cde3667
|
[
"MIT"
] | null | null | null |
PYQT/pyqt_18_menuaction_trigger.py
|
dogancantorun8/python-application
|
3ef972e52bb6950108cde36974ceaf5c3cde3667
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 26 18:47:15 2021
@author: dogancan
"""
#Menu barda her action ile oluşan sinyalimi trigger ile yakalayacağım
import sys
from PyQt5.QtWidgets import *
from PyQt5.Qt import *
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.resize(640, 480)
self.filePopup = QMenu('File', self)
self.menuBar().addMenu(self.filePopup)
self.editPopup = self.menuBar().addMenu('Edit')
self.openAction = self.filePopup.addAction('&Open')
self.openAction.triggered.connect(self.openActionHandler) #Open aksiyonu icin trigger connect oluşturdum
self.closeAction = self.filePopup.addAction('&Close')
self.closeAction.triggered.connect(self.closeActionHandler)
self.cutAction = self.editPopup.addAction('&Cut', self.cutActionHandler)
self.copyAction = self.editPopup.addAction('&Copy', self.copyActionHandler)
self.pasteAction = self.editPopup.addAction('&Paste', self.pasteActionHandler)
def openActionHandler(self):
print('Open')
def closeActionHandler(self):
self.close()
def cutActionHandler(self):
print('Cut')
def copyActionHandler(self):
print('Copy')
def pasteActionHandler(self):
print('Paste')
app = QApplication(sys.argv)
mainWindow = MainWindow()
mainWindow.show()
app.exec()
| 26.660377
| 114
| 0.679406
|
6dc9d8f8865f95629c56d480eff719f30eebb16c
| 665
|
py
|
Python
|
qsiprep/__about__.py
|
arokem/qsiprep
|
f0a12fa002ea99cad97f2b5e40c1517d0569e14c
|
[
"BSD-3-Clause"
] | 36
|
2019-04-07T18:53:15.000Z
|
2021-04-04T10:35:54.000Z
|
qsiprep/__about__.py
|
arokem/qsiprep
|
f0a12fa002ea99cad97f2b5e40c1517d0569e14c
|
[
"BSD-3-Clause"
] | 178
|
2019-02-27T16:36:06.000Z
|
2021-04-06T12:48:38.000Z
|
qsiprep/__about__.py
|
arokem/qsiprep
|
f0a12fa002ea99cad97f2b5e40c1517d0569e14c
|
[
"BSD-3-Clause"
] | 20
|
2019-04-05T19:17:26.000Z
|
2021-03-25T14:47:32.000Z
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Base module variables
"""
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__packagename__ = 'qsiprep'
__copyright__ = 'Copyright 2019, Brain Behavior Laboratory, University of Pennsylvania'
__credits__ = ('Contributors: please check the ``.zenodo.json`` file at the top-level folder'
'of the repository')
__url__ = 'https://github.com/pennbbl/qsiprep'
DOWNLOAD_URL = (
'https://github.com/pennbbl/{name}/archive/{ver}.tar.gz'.format(
name=__packagename__, ver=__version__))
| 35
| 93
| 0.709774
|
fd37883bf984a6fdfcf374fbe97bb966f776787d
| 5,151
|
py
|
Python
|
airflow/contrib/operators/gcs_operator.py
|
shrutimantri/airflow
|
61eaaacd20ab0f743786df895cf8f232b3b2a48c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 15
|
2017-04-06T09:01:50.000Z
|
2021-10-02T13:54:31.000Z
|
airflow/contrib/operators/gcs_operator.py
|
shrutimantri/airflow
|
61eaaacd20ab0f743786df895cf8f232b3b2a48c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 26
|
2019-08-05T13:44:11.000Z
|
2022-03-30T10:06:18.000Z
|
airflow/contrib/operators/gcs_operator.py
|
shrutimantri/airflow
|
61eaaacd20ab0f743786df895cf8f232b3b2a48c
|
[
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 21
|
2017-08-20T03:01:05.000Z
|
2021-09-07T06:47:51.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.version import version
class GoogleCloudStorageCreateBucketOperator(BaseOperator):
"""
Creates a new bucket. Google Cloud Storage uses a flat namespace,
so you can't create a bucket with a name that is already in use.
.. seealso::
For more information, see Bucket Naming Guidelines:
https://cloud.google.com/storage/docs/bucketnaming.html#requirements
:param bucket_name: The name of the bucket. (templated)
:type bucket_name: str
:param resource: An optional dict with parameters for creating the bucket.
For information on available parameters, see Cloud Storage API doc:
https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
:type resource: dict
:param storage_class: This defines how objects in the bucket are stored
and determines the SLA and the cost of storage (templated). Values include
- ``MULTI_REGIONAL``
- ``REGIONAL``
- ``STANDARD``
- ``NEARLINE``
- ``COLDLINE``.
If this value is not specified when the bucket is
created, it will default to STANDARD.
:type storage_class: str
:param location: The location of the bucket. (templated)
Object data for objects in the bucket resides in physical storage
within this region. Defaults to US.
.. seealso:: https://developers.google.com/storage/docs/bucket-locations
:type location: str
:param project_id: The ID of the GCP Project. (templated)
:type project_id: str
:param labels: User-provided labels, in key/value pairs.
:type labels: dict
:param google_cloud_storage_conn_id: The connection ID to use when
connecting to Google cloud storage.
:type google_cloud_storage_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must
have domain-wide delegation enabled.
:type delegate_to: str
The following Operator would create a new bucket ``test-bucket``
with ``MULTI_REGIONAL`` storage class in ``EU`` region
.. code-block:: python
CreateBucket = GoogleCloudStorageCreateBucketOperator(
task_id='CreateNewBucket',
bucket_name='test-bucket',
storage_class='MULTI_REGIONAL',
location='EU',
labels={'env': 'dev', 'team': 'airflow'},
google_cloud_storage_conn_id='airflow-service-account'
)
"""
template_fields = ('bucket_name', 'storage_class',
'location', 'project_id')
ui_color = '#f0eee4'
@apply_defaults
def __init__(self,
bucket_name,
resource=None,
storage_class='MULTI_REGIONAL',
location='US',
project_id=None,
labels=None,
google_cloud_storage_conn_id='google_cloud_default',
delegate_to=None,
*args,
**kwargs):
super(GoogleCloudStorageCreateBucketOperator, self).__init__(*args, **kwargs)
self.bucket_name = bucket_name
self.resource = resource
self.storage_class = storage_class
self.location = location
self.project_id = project_id
self.labels = labels
self.google_cloud_storage_conn_id = google_cloud_storage_conn_id
self.delegate_to = delegate_to
def execute(self, context):
if self.labels is not None:
self.labels.update(
{'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')}
)
hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,
delegate_to=self.delegate_to
)
hook.create_bucket(bucket_name=self.bucket_name,
resource=self.resource,
storage_class=self.storage_class,
location=self.location,
project_id=self.project_id,
labels=self.labels)
| 39.022727
| 86
| 0.650165
|
9ba1c1f715455233bd1f6adc0c087a244e74281b
| 16,061
|
py
|
Python
|
sacred/initialize.py
|
ssudholt/sacred
|
6fb05b1ee1b7706b44c3ebd852e1e234841ea2ce
|
[
"MIT"
] | null | null | null |
sacred/initialize.py
|
ssudholt/sacred
|
6fb05b1ee1b7706b44c3ebd852e1e234841ea2ce
|
[
"MIT"
] | null | null | null |
sacred/initialize.py
|
ssudholt/sacred
|
6fb05b1ee1b7706b44c3ebd852e1e234841ea2ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import os
from collections import OrderedDict, defaultdict
from copy import copy, deepcopy
from sacred.config import (ConfigDict, chain_evaluate_config_scopes, dogmatize,
load_config_file, undogmatize)
from sacred.config.config_summary import ConfigSummary
from sacred.host_info import get_host_info
from sacred.randomness import create_rnd, get_seed
from sacred.run import Run
from sacred.utils import (convert_to_nested_dict, create_basic_stream_logger,
get_by_dotted_path, is_prefix, rel_path,
iterate_flattened, set_by_dotted_path,
recursive_update, iter_prefixes, join_paths,
NamedConfigNotFoundError, ConfigAddedError)
class Scaffold(object):
def __init__(self, config_scopes, subrunners, path, captured_functions,
commands, named_configs, config_hooks, generate_seed):
self.config_scopes = config_scopes
self.named_configs = named_configs
self.subrunners = subrunners
self.path = path
self.generate_seed = generate_seed
self.config_hooks = config_hooks
self.config_updates = {}
self.named_configs_to_use = []
self.config = {}
self.fallback = None
self.presets = {}
self.fixture = None # TODO: rename
self.logger = None
self.seed = None
self.rnd = None
self._captured_functions = captured_functions
self.commands = commands
self.config_mods = None
self.summaries = []
self.captured_args = {join_paths(cf.prefix, n)
for cf in self._captured_functions
for n in cf.signature.arguments}
self.captured_args.add('__doc__') # allow setting the config docstring
def set_up_seed(self, rnd=None):
if self.seed is not None:
return
self.seed = self.config.get('seed')
if self.seed is None:
self.seed = get_seed(rnd)
self.rnd = create_rnd(self.seed)
if self.generate_seed:
self.config['seed'] = self.seed
if 'seed' in self.config and 'seed' in self.config_mods.added:
self.config_mods.modified.add('seed')
self.config_mods.added -= {'seed'}
# Hierarchically set the seed of proper subrunners
for subrunner_path, subrunner in reversed(list(
self.subrunners.items())):
if is_prefix(self.path, subrunner_path):
subrunner.set_up_seed(self.rnd)
def gather_fallbacks(self):
fallback = {'_log': self.logger}
for sr_path, subrunner in self.subrunners.items():
if self.path and is_prefix(self.path, sr_path):
path = sr_path[len(self.path):].strip('.')
set_by_dotted_path(fallback, path, subrunner.config)
else:
set_by_dotted_path(fallback, sr_path, subrunner.config)
# dogmatize to make the subrunner configurations read-only
self.fallback = dogmatize(fallback)
self.fallback.revelation()
def run_named_config(self, config_name):
if os.path.isfile(config_name):
nc = ConfigDict(load_config_file(config_name))
else:
if config_name not in self.named_configs:
raise NamedConfigNotFoundError(
named_config=config_name,
available_named_configs=tuple(self.named_configs.keys()))
nc = self.named_configs[config_name]
cfg = nc(fixed=self.get_config_updates_recursive(),
preset=self.presets,
fallback=self.fallback)
return undogmatize(cfg)
def set_up_config(self):
self.config, self.summaries = chain_evaluate_config_scopes(
self.config_scopes,
fixed=self.config_updates,
preset=self.config,
fallback=self.fallback)
self.get_config_modifications()
def run_config_hooks(self, config, command_name, logger):
final_cfg_updates = {}
for ch in self.config_hooks:
cfg_upup = ch(deepcopy(config), command_name, logger)
if cfg_upup:
recursive_update(final_cfg_updates, cfg_upup)
recursive_update(final_cfg_updates, self.config_updates)
return final_cfg_updates
def get_config_modifications(self):
self.config_mods = ConfigSummary(
added={key
for key, value in iterate_flattened(self.config_updates)})
for cfg_summary in self.summaries:
self.config_mods.update_from(cfg_summary)
def get_config_updates_recursive(self):
config_updates = self.config_updates.copy()
for sr_path, subrunner in self.subrunners.items():
if not is_prefix(self.path, sr_path):
continue
update = subrunner.get_config_updates_recursive()
if update:
config_updates[rel_path(self.path, sr_path)] = update
return config_updates
def get_fixture(self):
if self.fixture is not None:
return self.fixture
def get_fixture_recursive(runner):
for sr_path, subrunner in runner.subrunners.items():
# I am not sure if it is necessary to trigger all
subrunner.get_fixture()
get_fixture_recursive(subrunner)
sub_fix = copy(subrunner.config)
sub_path = sr_path
if is_prefix(self.path, sub_path):
sub_path = sr_path[len(self.path):].strip('.')
# Note: This might fail if we allow non-dict fixtures
set_by_dotted_path(self.fixture, sub_path, sub_fix)
self.fixture = copy(self.config)
get_fixture_recursive(self)
return self.fixture
def finalize_initialization(self, run):
# look at seed again, because it might have changed during the
# configuration process
if 'seed' in self.config:
self.seed = self.config['seed']
self.rnd = create_rnd(self.seed)
for cfunc in self._captured_functions:
cfunc.logger = self.logger.getChild(cfunc.__name__)
cfunc.config = get_by_dotted_path(self.get_fixture(), cfunc.prefix,
default={})
seed = get_seed(self.rnd)
cfunc.rnd = create_rnd(seed)
cfunc.run = run
if not run.force:
self._warn_about_suspicious_changes()
def _warn_about_suspicious_changes(self):
for add in sorted(self.config_mods.added):
if not set(iter_prefixes(add)).intersection(self.captured_args):
if self.path:
add = join_paths(self.path, add)
raise ConfigAddedError(add, config=self.config)
else:
self.logger.warning('Added new config entry: "%s"' % add)
for key, (type_old, type_new) in self.config_mods.typechanged.items():
if type_old in (int, float) and type_new in (int, float):
continue
self.logger.warning(
'Changed type of config entry "%s" from %s to %s' %
(key, type_old.__name__, type_new.__name__))
for cfg_summary in self.summaries:
for key in cfg_summary.ignored_fallbacks:
self.logger.warning(
'Ignored attempt to set value of "%s", because it is an '
'ingredient.' % key
)
def __repr__(self):
return "<Scaffold: '{}'>".format(self.path)
def get_configuration(scaffolding):
config = {}
for sc_path, scaffold in reversed(list(scaffolding.items())):
if not scaffold.config:
continue
if sc_path:
set_by_dotted_path(config, sc_path, scaffold.config)
else:
config.update(scaffold.config)
return config
def distribute_named_configs(scaffolding, named_configs):
for ncfg in named_configs:
if os.path.exists(ncfg):
scaffolding[''].use_named_config(ncfg)
else:
path, _, cfg_name = ncfg.rpartition('.')
if path not in scaffolding:
raise KeyError('Ingredient for named config "{}" not found'
.format(ncfg))
scaffolding[path].use_named_config(cfg_name)
def initialize_logging(experiment, scaffolding, log_level=None):
if experiment.logger is None:
root_logger = create_basic_stream_logger()
else:
root_logger = experiment.logger
for sc_path, scaffold in scaffolding.items():
if sc_path:
scaffold.logger = root_logger.getChild(sc_path)
else:
scaffold.logger = root_logger
# set log level
if log_level is not None:
try:
lvl = int(log_level)
except ValueError:
lvl = log_level
root_logger.setLevel(lvl)
return root_logger, root_logger.getChild(experiment.path)
def create_scaffolding(experiment, sorted_ingredients):
scaffolding = OrderedDict()
for ingredient in sorted_ingredients[:-1]:
scaffolding[ingredient] = Scaffold(
config_scopes=ingredient.configurations,
subrunners=OrderedDict([(scaffolding[m].path, scaffolding[m])
for m in ingredient.ingredients]),
path=ingredient.path,
captured_functions=ingredient.captured_functions,
commands=ingredient.commands,
named_configs=ingredient.named_configs,
config_hooks=ingredient.config_hooks,
generate_seed=False)
scaffolding[experiment] = Scaffold(
experiment.configurations,
subrunners=OrderedDict([(scaffolding[m].path, scaffolding[m])
for m in experiment.ingredients]),
path='',
captured_functions=experiment.captured_functions,
commands=experiment.commands,
named_configs=experiment.named_configs,
config_hooks=experiment.config_hooks,
generate_seed=True)
scaffolding_ret = OrderedDict([
(sc.path, sc)
for sc in scaffolding.values()
])
if len(scaffolding_ret) != len(scaffolding):
raise ValueError(
'The pathes of the ingredients are not unique. '
'{}'.format([s.path for s in scaffolding])
)
return scaffolding_ret
def gather_ingredients_topological(ingredient):
sub_ingredients = defaultdict(int)
for sub_ing, depth in ingredient.traverse_ingredients():
sub_ingredients[sub_ing] = max(sub_ingredients[sub_ing], depth)
return sorted(sub_ingredients, key=lambda x: -sub_ingredients[x])
def get_config_modifications(scaffolding):
config_modifications = ConfigSummary()
for sc_path, scaffold in scaffolding.items():
config_modifications.update_add(scaffold.config_mods, path=sc_path)
return config_modifications
def get_command(scaffolding, command_path):
path, _, command_name = command_path.rpartition('.')
if path not in scaffolding:
raise KeyError('Ingredient for command "%s" not found.' % command_path)
if command_name in scaffolding[path].commands:
return scaffolding[path].commands[command_name]
else:
if path:
raise KeyError('Command "%s" not found in ingredient "%s"' %
(command_name, path))
else:
raise KeyError('Command "%s" not found' % command_name)
def find_best_match(path, prefixes):
"""Find the Ingredient that shares the longest prefix with path."""
path_parts = path.split('.')
for p in prefixes:
if len(p) <= len(path_parts) and p == path_parts[:len(p)]:
return '.'.join(p), '.'.join(path_parts[len(p):])
return '', path
def distribute_presets(prefixes, scaffolding, config_updates):
for path, value in iterate_flattened(config_updates):
scaffold_name, suffix = find_best_match(path, prefixes)
scaff = scaffolding[scaffold_name]
set_by_dotted_path(scaff.presets, suffix, value)
def distribute_config_updates(prefixes, scaffolding, config_updates):
for path, value in iterate_flattened(config_updates):
scaffold_name, suffix = find_best_match(path, prefixes)
scaff = scaffolding[scaffold_name]
set_by_dotted_path(scaff.config_updates, suffix, value)
def get_scaffolding_and_config_name(named_config, scaffolding):
if os.path.exists(named_config):
path, cfg_name = '', named_config
else:
path, _, cfg_name = named_config.rpartition('.')
if path not in scaffolding:
raise KeyError('Ingredient for named config "{}" not found'
.format(named_config))
scaff = scaffolding[path]
return scaff, cfg_name
def create_run(experiment, command_name, config_updates=None,
named_configs=(), force=False, log_level=None):
sorted_ingredients = gather_ingredients_topological(experiment)
scaffolding = create_scaffolding(experiment, sorted_ingredients)
# get all split non-empty prefixes sorted from deepest to shallowest
prefixes = sorted([s.split('.') for s in scaffolding if s != ''],
reverse=True, key=lambda p: len(p))
# --------- configuration process -------------------
# Phase 1: Config updates
config_updates = config_updates or {}
config_updates = convert_to_nested_dict(config_updates)
root_logger, run_logger = initialize_logging(experiment, scaffolding,
log_level)
distribute_config_updates(prefixes, scaffolding, config_updates)
# Phase 2: Named Configs
for ncfg in named_configs:
scaff, cfg_name = get_scaffolding_and_config_name(ncfg, scaffolding)
scaff.gather_fallbacks()
ncfg_updates = scaff.run_named_config(cfg_name)
distribute_presets(prefixes, scaffolding, ncfg_updates)
for ncfg_key, value in iterate_flattened(ncfg_updates):
set_by_dotted_path(config_updates,
join_paths(scaff.path, ncfg_key),
value)
distribute_config_updates(prefixes, scaffolding, config_updates)
# Phase 3: Normal config scopes
for scaffold in scaffolding.values():
scaffold.gather_fallbacks()
scaffold.set_up_config()
# update global config
config = get_configuration(scaffolding)
# run config hooks
config_hook_updates = scaffold.run_config_hooks(
config, command_name, run_logger)
recursive_update(scaffold.config, config_hook_updates)
# Phase 4: finalize seeding
for scaffold in reversed(list(scaffolding.values())):
scaffold.set_up_seed() # partially recursive
config = get_configuration(scaffolding)
config_modifications = get_config_modifications(scaffolding)
# ----------------------------------------------------
experiment_info = experiment.get_experiment_info()
host_info = get_host_info()
main_function = get_command(scaffolding, command_name)
pre_runs = [pr for ing in sorted_ingredients for pr in ing.pre_run_hooks]
post_runs = [pr for ing in sorted_ingredients for pr in ing.post_run_hooks]
run = Run(config, config_modifications, main_function,
copy(experiment.observers), root_logger, run_logger,
experiment_info, host_info, pre_runs, post_runs,
experiment.captured_out_filter)
if hasattr(main_function, 'unobserved'):
run.unobserved = main_function.unobserved
run.force = force
for scaffold in scaffolding.values():
scaffold.finalize_initialization(run=run)
return run
| 37.879717
| 79
| 0.63676
|
332fa38fd11aedc458012100fbd8988c347edf4a
| 3,786
|
py
|
Python
|
tests/test_forms.py
|
zgrzebnickij/demo-cookiecutter-flask
|
3031c447dbaa3e819c1638cae79e83ee5869e292
|
[
"MIT"
] | null | null | null |
tests/test_forms.py
|
zgrzebnickij/demo-cookiecutter-flask
|
3031c447dbaa3e819c1638cae79e83ee5869e292
|
[
"MIT"
] | null | null | null |
tests/test_forms.py
|
zgrzebnickij/demo-cookiecutter-flask
|
3031c447dbaa3e819c1638cae79e83ee5869e292
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Test forms."""
import json
import pytest
from my_flask_app.public.forms import LoginForm
from my_flask_app.user.forms import RegisterForm
from my_flask_app.quiz.forms import QuizForm
class TestRegisterForm:
"""Register form."""
def test_validate_user_already_registered(self, user):
"""Enter username that is already registered."""
form = RegisterForm(
username=user.username,
email="foo@bar.com",
password="example",
confirm="example",
)
assert form.validate() is False
assert "Username already registered" in form.username.errors
def test_validate_email_already_registered(self, user):
"""Enter email that is already registered."""
form = RegisterForm(
username="unique", email=user.email, password="example", confirm="example"
)
assert form.validate() is False
assert "Email already registered" in form.email.errors
def test_validate_success(self, db):
"""Register with success."""
form = RegisterForm(
username="newusername",
email="new@test.test",
password="example",
confirm="example",
)
assert form.validate() is True
class TestLoginForm:
"""Login form."""
def test_validate_success(self, user):
"""Login successful."""
user.set_password("example")
user.save()
form = LoginForm(username=user.username, password="example")
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
"""Unknown username."""
form = LoginForm(username="unknown", password="example")
assert form.validate() is False
assert "Unknown username" in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
"""Invalid password."""
user.set_password("example")
user.save()
form = LoginForm(username=user.username, password="wrongpassword")
assert form.validate() is False
assert "Invalid password" in form.password.errors
def test_validate_inactive_user(self, user):
"""Inactive user."""
user.active = False
user.set_password("example")
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password="example")
assert form.validate() is False
assert "User not activated" in form.username.errors
class TestQuizForm:
"""Quiz form."""
def test_validate(self, user, guestions_fake):
"""Enter username that is already registered."""
form = QuizForm(
questions=json.dumps(guestions_fake),
answer_1="blabla",
answer_2="blabla",
answer_3="blabla",
answer_4="blabla",
answer_5="blabla"
)
assert form.validate() is True
def test_validate_false(self, user, guestions_fake):
"""Enter username that is already registered."""
form = QuizForm(
questions=json.dumps(guestions_fake),
answer_1=1,
answer_2="blabla",
answer_3="blabla",
answer_4=False,
answer_5="blabla"
)
assert form.validate() is False
def test_validate_missing_field(self, user, guestions_fake):
"""Enter username that is already registered."""
form = QuizForm(
questions=json.dumps(guestions_fake),
answer_1=1,
answer_2="blabla",
answer_4=False,
answer_5="blabla"
)
assert form.validate() is False
| 31.032787
| 86
| 0.611463
|
190d4d0a410af4b2b2ebc029c68997a285b61532
| 55
|
py
|
Python
|
T31-09/program.py
|
maa76/SSof-Project1920
|
9b4ad9ac41a648c425fcfcd49cd52ff84e528bde
|
[
"MIT"
] | 2
|
2019-11-20T19:26:07.000Z
|
2019-11-22T00:42:23.000Z
|
T31-09/program.py
|
maa76/SSof-Project1920
|
9b4ad9ac41a648c425fcfcd49cd52ff84e528bde
|
[
"MIT"
] | 2
|
2019-11-28T05:21:24.000Z
|
2019-11-28T05:21:58.000Z
|
T31-09/program.py
|
maa76/SSof-Project1920
|
9b4ad9ac41a648c425fcfcd49cd52ff84e528bde
|
[
"MIT"
] | 25
|
2019-11-27T01:40:56.000Z
|
2019-12-04T23:38:59.000Z
|
a = b
if a:
c = 1
else:
c = 2
d = function(c)
| 6.875
| 15
| 0.418182
|
ab78a8113b48fe977d0a57d0a78b72bb21361ab9
| 424
|
py
|
Python
|
myapp/urls.py
|
free20064u/flower
|
fa8dd4b522ede930baf8cd4e533c869ebf9cf009
|
[
"MIT"
] | null | null | null |
myapp/urls.py
|
free20064u/flower
|
fa8dd4b522ede930baf8cd4e533c869ebf9cf009
|
[
"MIT"
] | null | null | null |
myapp/urls.py
|
free20064u/flower
|
fa8dd4b522ede930baf8cd4e533c869ebf9cf009
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('flower/<slug:slug>/', views.detail, name='detail'),
path('tags/<slug:slug>/', views.tags, name='tags'),
path('create/', views.create, name='create'),
path('edit/<int:pk>', views.edit, name='edit'),
path('delete/<int:pk>', views.delete, name='delete'),
]
| 26.5
| 61
| 0.636792
|
04ca57b4959f9b6db41df9fa689259c924e50df2
| 2,281
|
py
|
Python
|
tests/codebase/test_no_request_host.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2015-01-31T14:42:39.000Z
|
2015-01-31T14:42:39.000Z
|
tests/codebase/test_no_request_host.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2021-05-08T06:24:26.000Z
|
2021-05-08T06:24:26.000Z
|
tests/codebase/test_no_request_host.py
|
timgates42/bokeh
|
fb8b07b838f4d07d520cfe899779a11bc89f3c77
|
[
"BSD-3-Clause"
] | 1
|
2020-01-21T12:03:58.000Z
|
2020-01-21T12:03:58.000Z
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import io
from os.path import relpath
from subprocess import check_output
# Bokeh imports
from . import TOP_PATH
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
@pytest.mark.codebase
def test_no_request_host():
''' It is not safe for the Bokeh codebase to use request.host in any way.
This test ensures "request.host" does not appear in any file.
'''
errors = collect_errors()
assert len(errors) == 0, "request.host usage issues:\n%s" % "\n".join(errors)
#-----------------------------------------------------------------------------
# Support
#-----------------------------------------------------------------------------
message = "File contains refers to 'request.host': %s, line %s."
def collect_errors():
errors = []
def test_this_file(fname, test_file):
for line_no, line in enumerate(test_file, 1):
if "request.host" in line.split("#")[0]:
errors.append((message, fname, line_no))
paths = check_output(["git", "ls-files"]).decode('utf-8').split("\n")
for path in paths:
if not path:
continue
if not path.endswith(".py"):
continue
if not path.startswith("bokeh/server"):
continue
with io.open(path, 'r', encoding='utf-8') as file:
test_this_file(path, file)
return [ msg % (relpath(fname, TOP_PATH), line_no) for (msg, fname, line_no) in errors ]
| 33.057971
| 92
| 0.421745
|
4c85af7e61d2499ea536be0285b6c72459d9f90f
| 567
|
py
|
Python
|
database/connection.py
|
apoveda25/graphql-python-server
|
eb7b911aa1116327120b857beb17da3e30523e74
|
[
"Apache-2.0"
] | 4
|
2020-06-20T11:54:04.000Z
|
2021-09-07T11:41:32.000Z
|
database/connection.py
|
apoveda25/graphql-python-server
|
eb7b911aa1116327120b857beb17da3e30523e74
|
[
"Apache-2.0"
] | null | null | null |
database/connection.py
|
apoveda25/graphql-python-server
|
eb7b911aa1116327120b857beb17da3e30523e74
|
[
"Apache-2.0"
] | null | null | null |
import os
from arango import ArangoClient
class Connection:
def __init__(self):
self.ARANGODB_HOSTS = os.getenv("ARANGODB_HOSTS")
self.ARANGODB_DATABASE = os.getenv("ARANGODB_DATABASE")
self.ARANGODB_USER = os.getenv("ARANGODB_USER")
self.ARANGODB_PASSWORD = os.getenv("ARANGODB_PASSWORD")
self.client = ArangoClient(hosts=os.getenv("ARANGODB_HOSTS"))
self.conn = self.client.db(
self.ARANGODB_DATABASE,
username=self.ARANGODB_USER,
password=self.ARANGODB_PASSWORD,
)
| 31.5
| 69
| 0.670194
|
3fed2e08c81b4b02d93ca25e9ab64fa98fb7e411
| 602
|
py
|
Python
|
main_loop.py
|
ivanpeng/craigslist-housing
|
98a197dd77a7c42a5b2423748f75563e1e6a8fc8
|
[
"MIT-0"
] | null | null | null |
main_loop.py
|
ivanpeng/craigslist-housing
|
98a197dd77a7c42a5b2423748f75563e1e6a8fc8
|
[
"MIT-0"
] | 1
|
2021-06-01T22:55:54.000Z
|
2021-06-01T22:55:54.000Z
|
main_loop.py
|
ivanpeng/craigslist-housing
|
98a197dd77a7c42a5b2423748f75563e1e6a8fc8
|
[
"MIT-0"
] | null | null | null |
from scraper import do_scrape
import settings
import time
import sys
import traceback
import random
if __name__ == "__main__":
while True:
print("{}: Starting scrape cycle".format(time.ctime()))
try:
do_scrape()
except KeyboardInterrupt:
print("Exiting....")
sys.exit(1)
except Exception as exc:
print("Error with the scraping:", sys.exc_info()[0])
traceback.print_exc()
else:
print("{}: Successfully finished scraping".format(time.ctime()))
time.sleep(settings.SLEEP_INTERVAL)
| 28.666667
| 76
| 0.606312
|
304695852b0b5fd66e48ee196c630d7c80ea5ebd
| 336
|
py
|
Python
|
backend/sandbox/todos/models/__init__.py
|
MMotionMan/django-edw
|
0f686429d29e0f40409a3b2318664973b2844c08
|
[
"BSD-3-Clause"
] | 4
|
2019-09-18T05:51:12.000Z
|
2020-10-23T08:50:00.000Z
|
backend/sandbox/todos/models/__init__.py
|
Vvvnukova/django-edw
|
18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f
|
[
"BSD-3-Clause"
] | 10
|
2020-04-29T11:46:44.000Z
|
2022-03-11T23:38:27.000Z
|
backend/sandbox/todos/models/__init__.py
|
Vvvnukova/django-edw
|
18397c2e6e2d7ddebad4d83ffee16425e7ac4e9f
|
[
"BSD-3-Clause"
] | 13
|
2020-04-09T07:49:48.000Z
|
2022-03-02T07:06:28.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# import default models from django-edw to materialize them
from edw.models.defaults import mapping
from edw.models.defaults.customer import Customer
from edw.models.defaults.term import Term
from edw.models.defaults.data_mart import DataMart
from todos import Todo
| 22.4
| 59
| 0.803571
|
414af702abfff71e4d4570e34cd194d11de8682c
| 2,564
|
py
|
Python
|
examples/simo/simo.res50.scratch.imagenet.224size.256bs.200e/config.py
|
poodarchu/SelfSup
|
29f7d338bef20f927bb0898f2c98da4f14b60ab1
|
[
"Apache-2.0"
] | 42
|
2020-12-07T07:03:05.000Z
|
2022-03-23T12:56:13.000Z
|
examples/simo/simo.res50.scratch.imagenet.224size.256bs.200e/config.py
|
Megvii-BaseDetection/SelfSup
|
29f7d338bef20f927bb0898f2c98da4f14b60ab1
|
[
"Apache-2.0"
] | 1
|
2020-12-07T12:59:16.000Z
|
2020-12-08T06:25:37.000Z
|
examples/simo/simo.res50.scratch.imagenet.224size.256bs.200e/config.py
|
poodarchu/SelfSup
|
29f7d338bef20f927bb0898f2c98da4f14b60ab1
|
[
"Apache-2.0"
] | 3
|
2021-04-22T10:56:29.000Z
|
2021-10-21T08:46:33.000Z
|
import os.path as osp
import torchvision.transforms as transforms
from cvpods.configs.base_classification_config import BaseClassificationConfig
_config_dict = dict(
MODEL=dict(
WEIGHTS="",
AS_PRETRAIN=True,
RESNETS=dict(
DEPTH=50,
NUM_CLASSES=1000,
NORM="SyncBN",
OUT_FEATURES=["linear"],
STRIDE_IN_1X1=False, # default true for msra models
ZERO_INIT_RESIDUAL=True, # default false, use true for all subsequent models
),
CLR=dict(
ALPHA=256,
K=256,
DIM=128,
TAU=0.2,
MLP=True,
NORM="SyncBN",
MOMENTUM=0.999,
),
),
DATASETS=dict(
TRAIN=("imagenet_train", ),
TEST=("imagenet_val", ),
),
DATALOADER=dict(NUM_WORKERS=6, ),
SOLVER=dict(
LR_SCHEDULER=dict(
NAME="WarmupCosineLR",
MAX_EPOCH=200,
WARMUP_ITERS=10,
EPOCH_WISE=False, # update lr in epoch / step
),
OPTIMIZER=dict(
NAME="SGD",
LARS=dict(
ENABLED=False,
EPS=1e-8,
TRUST_COEF=1e-3,
),
BASE_LR=0.03,
MOMENTUM=0.9,
WEIGHT_DECAY=1e-4,
WEIGHT_DECAY_NORM=1e-4,
),
CHECKPOINT_PERIOD=10,
IMS_PER_BATCH=256,
IMS_PER_DEVICE=32,
),
INPUT=dict(
AUG=dict(
TRAIN_PIPELINES=[
("RepeatList", dict(transforms=[
("Torch_Compose", transforms.Compose([
transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
])),
("GaussianBlur", dict(sigma=[.1, 2.], p=0.5)),
("Torch_Compose", transforms.Compose([
transforms.RandomGrayscale(p=0.2),
transforms.RandomHorizontalFlip(),
]))
], repeat_times=2)),
],
)
),
OUTPUT_DIR=osp.join(
'/data/Outputs/model_logs/cvpods_playground/SelfSup',
osp.split(osp.realpath(__file__))[0].split("SelfSup/")[-1]))
class MoCoV2Config(BaseClassificationConfig):
def __init__(self):
super(MoCoV2Config, self).__init__()
self._register_configuration(_config_dict)
config = MoCoV2Config()
| 29.813953
| 89
| 0.50351
|
34e7f02af5db91d7d20d50a4bc59431be42e73be
| 5,837
|
py
|
Python
|
groot_ansible/__init__.py
|
stonier/groot_ansible
|
670eb1e3f03dcb68bbf5204abc474b40f1cc44bc
|
[
"BSD-3-Clause"
] | 2
|
2020-02-12T01:35:48.000Z
|
2021-04-01T22:44:25.000Z
|
groot_ansible/__init__.py
|
stonier/groot_ansible
|
670eb1e3f03dcb68bbf5204abc474b40f1cc44bc
|
[
"BSD-3-Clause"
] | 7
|
2017-02-03T03:50:09.000Z
|
2020-05-04T17:22:16.000Z
|
groot_ansible/__init__.py
|
stonier/groot_ansible
|
670eb1e3f03dcb68bbf5204abc474b40f1cc44bc
|
[
"BSD-3-Clause"
] | null | null | null |
#
# License: BSD
# https://raw.githubusercontent.com/stonier/groot_ansible/devel/LICENSE
#
##############################################################################
# Documentation
##############################################################################
"""
Python wrapped ansible scripts for installing and updating systems for
various use cases.
"""
##############################################################################
# Imports
##############################################################################
import argparse
from . import common
from . import console
from . import ros
from . import update
from . import testies
from . import workstation
##############################################################################
# Constants
##############################################################################
__version__ = '0.4.2'
##############################################################################
# Main
##############################################################################
def version_string():
return console.cyan + "Version" + console.reset + " : " + console.yellow + __version__ + console.reset
def main(args=None):
"""
Entry point to the console 'groot-ansible' tool.
"""
try:
parser = argparse.ArgumentParser(
description=console.green + "A python frontend to groot playbooks." + console.reset,
epilog=console.bold + console.white + "And his noodly appendage reached forth to tickle the blessed...\n" + console.reset,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-v', '--version', action='version', version=version_string())
subparsers = parser.add_subparsers(title='commands',
help='valid commands for groot-ansible interactions')
update.add_subparser(subparsers)
testies.add_subparser(subparsers)
common.add_generic_subparser(subparsers,
name="bootstrap/pc",
playbook_name="bootstrap-pc",
short_description="bootstrap a pc/laptop for development",
description="Standard ubuntu development workstation",
become_sudo=True)
workstation.add_subparser(subparsers)
common.add_generic_subparser(subparsers, name="os/ubuntu",
playbook_name="os-ubuntu",
short_description="useful non-core packages for ubuntu",
description="Extra packages and configuration for the core of a basic development environment",
become_sudo=True)
common.add_generic_subparser(subparsers, name="os/kubuntu",
playbook_name="os-kubuntu",
short_description="kubuntu desktop packages and configuration",
description="Add the kubuntu (full) desktop environment to an ubuntu installation",
become_sudo=True)
common.add_generic_subparser(subparsers, name="os/system76",
playbook_name="os-system76",
short_description="drivers from system76 for ubuntu",
description="System76 environment setup/install/update",
become_sudo=True)
common.add_generic_subparser(subparsers, name="devel/git",
playbook_name="devel-git",
short_description="git binaries, modules and configuration",
description="Git binaries, modules (lfs) and user configuration",
become_sudo=True)
common.add_generic_subparser(subparsers, name="devel/powerline",
playbook_name="devel-powerline",
short_description="powerline in the shell for the user",
description="Setup powerline in the shell for the user",
become_sudo=True)
ros.add_subparser(subparsers)
common.add_generic_subparser(subparsers, name="devel/ros2",
playbook_name="devel-ros2",
short_description="ros2 environment for ubuntu",
description="ROS 2 environment setup/install/update",
become_sudo=True)
common.add_generic_subparser(subparsers, name="extras/chrome",
playbook_name="extras-chrome",
short_description="google chrome for ubuntu",
description="Google chrome setup/install/update",
become_sudo=True)
common.add_generic_subparser(subparsers, name="extras/snorriheim",
playbook_name="extras-snorriheim",
short_description="snorriheim ppa and packages",
description="Snorriheim's PPA and packages for ubuntu",
become_sudo=True)
options = parser.parse_args(args)
# options, unused_unknown_args = parser.parse_known_args(args)
options.func(options) # relay arg parsing to the subparser configured `set_defaults` function callback
except KeyboardInterrupt:
print('Interrupted by user!')
| 50.756522
| 134
| 0.49032
|
2961ba3e105c7a451f911c19bb7b18567804341e
| 2,055
|
py
|
Python
|
code/test_fastsimplexordatastore.py
|
JustinCappos/uppir
|
2a2fc435d5e21138f7f4543c6b3588d6529f4619
|
[
"Apache-2.0"
] | null | null | null |
code/test_fastsimplexordatastore.py
|
JustinCappos/uppir
|
2a2fc435d5e21138f7f4543c6b3588d6529f4619
|
[
"Apache-2.0"
] | null | null | null |
code/test_fastsimplexordatastore.py
|
JustinCappos/uppir
|
2a2fc435d5e21138f7f4543c6b3588d6529f4619
|
[
"Apache-2.0"
] | null | null | null |
# this is a bunch of macro tests. If everything passes, there is no output.
import fastsimplexordatastore
size = 64
letterxordatastore = fastsimplexordatastore.XORDatastore(size, 16)
startpos = 0
for char in range(ord("A"), ord("Q")):
# put 1K of those chars in...
letterxordatastore.set_data(startpos, chr(char) * size)
startpos = startpos + size
# can read data out...
assert(letterxordatastore.get_data(size, 1) == 'B')
# let's create a bitstring that uses A, C, and P.
bitstring = chr(int('10100000', 2)) + chr(int('00000001',2))
xorresult = letterxordatastore.produce_xor_from_bitstring(bitstring)
assert(xorresult[0] == 'R')
letterxordatastore.set_data(10,"Hello there")
mystring = letterxordatastore.get_data(9,13)
assert(mystring == 'AHello thereA')
letterxordatastore.set_data(1,"Hello there"*size)
mystring = letterxordatastore.get_data(size*2 - (size*2 %11) + 1,11)
assert(mystring == "Hello there")
# let's try to read the last bytes of data
mystring = letterxordatastore.get_data(size*15,size)
try:
letterxordatastore = fastsimplexordatastore.XORDatastore(127, 16)
except TypeError:
pass
else:
print "Was allowed to use a block size that isn't a multiple of 64"
try:
letterxordatastore.set_data(size*16, "hi")
except TypeError:
pass
else:
print "Was allowed to write past the end of the datastore"
try:
letterxordatastore.set_data(size*16, 1)
except TypeError:
pass
else:
print "Was allowed to read past the end of the datastore"
for blockcount in [9,15,16]:
letterxordatastore = fastsimplexordatastore.XORDatastore(size, blockcount)
# is a 0 block the right size?
assert( len(letterxordatastore.produce_xor_from_bitstring(chr(0)*2)) == size )
try:
letterxordatastore.produce_xor_from_bitstring(chr(0)*1)
except TypeError:
pass
else:
print "didn't detect incorrect (short) bitstring length"
try:
letterxordatastore.produce_xor_from_bitstring(chr(0)*3)
except TypeError:
pass
else:
print "didn't detect incorrect (long) bitstring length"
| 22.582418
| 80
| 0.734793
|
66e96af05eb71dffdc797a7e43ac1a1a2c74529a
| 63,814
|
py
|
Python
|
pandas/core/internals/managers.py
|
rogererens/pandas
|
6812842d3653e93f9191a59446b1b4b19c77c428
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/managers.py
|
rogererens/pandas
|
6812842d3653e93f9191a59446b1b4b19c77c428
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/internals/managers.py
|
rogererens/pandas
|
6812842d3653e93f9191a59446b1b4b19c77c428
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas._typing import ArrayLike, DtypeObj, Label
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_datetimelike_v_numeric,
is_extension_array_dtype,
is_list_like,
is_numeric_v_string_like,
is_scalar,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.base import PandasObject
from pandas.core.indexers import maybe_convert_indices
from pandas.core.indexes.api import Index, ensure_index
from pandas.core.internals.blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from pandas.core.internals.concat import ( # all for concatenate_block_managers
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
from pandas.io.formats.printing import pprint_thing
# TODO: flexible with index=None and/or items=None
T = TypeVar("T", bound="BlockManager")
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_dtypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
blocks: Sequence of Block
axes: Sequence of Index
do_integrity_check: bool, default True
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
_blknos: np.ndarray
_blklocs: np.ndarray
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks: Tuple[Block, ...] = tuple(blocks)
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
f"Number of Block dimensions ({block.ndim}) must equal "
f"number of axes ({self.ndim})"
)
if do_integrity_check:
self._verify_integrity()
# Populate known_consolidate, blknos, and blklocs lazily
self._known_consolidated = False
self._blknos = None
self._blklocs = None
@classmethod
def from_blocks(cls, blocks: List[Block], axes: List[Index]):
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
return cls(blocks, axes, do_integrity_check=False)
@property
def blknos(self):
"""
Suppose we want to find the array corresponding to our i'th column.
blknos[i] identifies the block from self.blocks that contains this column.
blklocs[i] identifies the column of interest within
self.blocks[self.blknos[i]]
"""
if self._blknos is None:
# Note: these can be altered by other BlockManager methods.
self._rebuild_blknos_and_blklocs()
return self._blknos
@property
def blklocs(self):
"""
See blknos.__doc__
"""
if self._blklocs is None:
# Note: these can be altered by other BlockManager methods.
self._rebuild_blknos_and_blklocs()
return self._blklocs
def make_empty(self: T, axes=None) -> T:
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [Index([])] + self.axes[1:]
# preserve dtype if possible
if self.ndim == 1:
assert isinstance(self, SingleBlockManager) # for mypy
blk = self.blocks[0]
arr = blk.values[:0]
nb = blk.make_block_same_class(arr, placement=slice(0, 0), ndim=1)
blocks = [nb]
else:
blocks = []
return type(self).from_blocks(blocks, axes)
def __nonzero__(self) -> bool:
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self) -> Tuple[int, ...]:
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis: int, new_labels: Index) -> None:
# Caller is responsible for ensuring we have an Index object.
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
f"Length mismatch: Expected axis has {old_len} elements, new "
f"values have {new_len} elements"
)
self.axes[axis] = new_labels
@property
def _is_single_block(self) -> bool:
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self) -> None:
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
# TODO: can we avoid this? it isn't cheap
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self) -> Index:
return self.axes[0]
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self.blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
self._post_setstate()
def _post_setstate(self) -> None:
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = type(self).__name__
for i, ax in enumerate(self.axes):
if i == 0:
output += f"\nItems: {ax}"
else:
output += f"\nAxis {i}: {ax}"
for block in self.blocks:
output += f"\n{pprint_thing(block)}"
return output
def _verify_integrity(self) -> None:
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
f"block items\n# manager items: {len(self.items)}, # "
f"tot_items: {tot_items}"
)
def reduce(self, func, *args, **kwargs):
# If 2D, we assume that we're operating column-wise
if self.ndim == 1:
# we'll be returning a scalar
blk = self.blocks[0]
return func(blk.values, *args, **kwargs)
res = {}
for blk in self.blocks:
bres = func(blk.values, *args, **kwargs)
if np.ndim(bres) == 0:
# EA
assert blk.shape[0] == 1
new_res = zip(blk.mgr_locs.as_array, [bres])
else:
assert bres.ndim == 1, bres.shape
assert blk.shape[0] == len(bres), (blk.shape, bres.shape, args, kwargs)
new_res = zip(blk.mgr_locs.as_array, bres)
nr = dict(new_res)
assert not any(key in res for key in nr)
res.update(nr)
return res
def apply(self: T, f, filter=None, **kwargs) -> T:
"""
Iterate over the blocks, collect and create a new BlockManager.
Parameters
----------
f : str or callable
Name of the Block method to apply.
filter : list, if supplied, only call the block if the filter is in
the block
Returns
-------
BlockManager
"""
result_blocks = []
# fillna: Series/DataFrame is responsible for making sure value is aligned
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
else:
align_keys = []
# TODO(EA): may interfere with ExtensionBlock.setitem for blocks
# with a .values attribute.
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = obj._info_axis_number
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
if callable(f):
applied = b.apply(f, **kwargs)
else:
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(self.axes)
return type(self).from_blocks(result_blocks, self.axes)
def quantile(
self,
axis: int = 0,
consolidate: bool = True,
transposed: bool = False,
interpolation="linear",
qs=None,
numeric_only=None,
) -> "BlockManager":
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: bool, default True. Join together blocks having same
dtype
transposed: bool, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
BlockManager
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return type(self)(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
make_block(values, ndim=1, placement=np.arange(len(values))),
axes[0],
fastpath=True,
)
def isna(self, func) -> "BlockManager":
return self.apply("apply", func=func)
def where(self, **kwargs) -> "BlockManager":
return self.apply("where", **kwargs)
def setitem(self, indexer, value) -> "BlockManager":
return self.apply("setitem", indexer=indexer, value=value)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, n: int, axis: int) -> "BlockManager":
return self.apply("diff", n=n, axis=axis)
def interpolate(self, **kwargs) -> "BlockManager":
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs) -> "BlockManager":
return self.apply("shift", **kwargs)
def fillna(self, **kwargs) -> "BlockManager":
return self.apply("fillna", **kwargs)
def downcast(self) -> "BlockManager":
return self.apply("downcast")
def astype(
self, dtype, copy: bool = False, errors: str = "raise"
) -> "BlockManager":
return self.apply("astype", dtype=dtype, copy=copy, errors=errors)
def convert(
self,
copy: bool = True,
datetime: bool = True,
numeric: bool = True,
timedelta: bool = True,
coerce: bool = False,
) -> "BlockManager":
return self.apply(
"convert",
copy=copy,
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
)
def replace(self, value, **kwargs) -> "BlockManager":
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(
self, src_list, dest_list, inplace: bool = False, regex: bool = False
) -> "BlockManager":
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
# figure out our mask a-priori to avoid repeated replacements
values = self.as_array()
def comp(s, regex=False):
"""
Generate a bool array by perform an equality check, or perform
an element-wise regular expression matching
"""
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for s in src_list]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
new_rb: List[Block] = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any() or convert:
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = type(self).from_blocks(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self) -> bool:
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self) -> None:
dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]
self._is_consolidated = len(dtypes) == len(set(dtypes))
self._known_consolidated = True
@property
def is_mixed_type(self) -> bool:
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self) -> bool:
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def any_extension_types(self) -> bool:
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self) -> bool:
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy: bool = False) -> "BlockManager":
"""
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy: bool = False) -> "BlockManager":
"""
Parameters
----------
copy : bool, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks: List[Block], copy: bool = True) -> "BlockManager":
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(
inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False
)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return type(self).from_blocks(new_blocks, axes)
def get_slice(self, slobj: slice, axis: int = 0) -> "BlockManager":
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
elif axis == 1:
_slicer = [slice(None)] * (axis + 1)
_slicer[axis] = slobj
slicer = tuple(_slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
else:
raise IndexError("Requested axis not found in manager")
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = type(self)(new_blocks, new_axes, do_integrity_check=False)
return bm
def __contains__(self, item) -> bool:
return item in self.items
@property
def nblocks(self) -> int:
return len(self.blocks)
def copy(self: T, deep=True) -> T:
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : bool or string, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
# hit in e.g. tests.io.json.test_pandas
def copy_func(ax):
return ax.copy(deep=True) if deep == "all" else ax.view()
new_axes = [copy_func(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
res = self.apply("copy", deep=deep)
res.axes = new_axes
return res
def as_array(self, transpose: bool = False) -> np.ndarray:
"""
Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : bool, default False
If True, transpose the return array,
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
if self._is_single_block and self.blocks[0].is_datetimetz:
# TODO(Block.get_values): Make DatetimeTZBlock.get_values
# always be object dtype. Some callers seem to want the
# DatetimeArray (previously DTI)
arr = self.blocks[0].get_values(dtype=object)
elif self._is_single_block or not self.is_mixed_type:
arr = np.asarray(self.blocks[0].get_values())
else:
arr = self._interleave()
return arr.transpose() if transpose else arr
def _interleave(self) -> np.ndarray:
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy: bool = True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : bool, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd: Dict[str, List[Block]] = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc: int):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if isinstance(dtype, ExtensionDtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self) -> "BlockManager":
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = type(self)(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self) -> None:
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(
new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True
)
def iget(self, i: int) -> "SingleBlockManager":
"""
Return the data as a SingleBlockManager.
"""
block = self.blocks[self.blknos[i]]
values = block.iget(self.blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
),
self.axes[1],
fastpath=True,
)
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self.blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._rebuild_blknos_and_blklocs()
def set(self, item: Label, value):
"""
Set new item in-place.
Notes
-----
Does not consolidate.
Adds new Block if not contained in the current items Index.
"""
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
self.iset(loc, value)
def iset(self, loc: Union[int, slice, np.ndarray], value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
if self._blklocs is None and self.ndim > 1:
self._rebuild_blknos_and_blklocs()
value_is_extension_type = is_extension_array_dtype(value)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
if lib.is_integer(loc):
# We have 6 tests where loc is _not_ an int.
# In this case, get_blkno_placements will yield only one tuple,
# containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
loc = [loc]
# Accessing public blknos ensures the public versions are initialized
blknos = self.blknos[loc]
blklocs = self.blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(
new_blknos, self._blknos, axis=0, allow_fill=False
)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks: List[Block] = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(
values=value,
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item, value, allow_duplicates: bool = False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError(f"cannot insert {item}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
if value.ndim == self.ndim - 1 and not is_extension_array_dtype(value):
value = _safe_reshape(value, (1,) + value.shape)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self.blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
# Accessing public blklocs ensures the public versions are initialized
if loc == self.blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(
self,
new_index,
axis: int,
method=None,
limit=None,
fill_value=None,
copy: bool = True,
):
"""
Conform block manager to new index.
"""
new_index = ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit
)
return self.reindex_indexer(
new_index, indexer, axis=axis, fill_value=fill_value, copy=copy
)
def reindex_indexer(
self: T,
new_axis,
indexer,
axis: int,
fill_value=None,
allow_dups: bool = False,
copy: bool = True,
) -> T:
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object, default None
allow_dups : bool, default False
copy : bool, default True
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,))
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return type(self).from_blocks(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple,
)
]
if sl_type in ("slice", "mask"):
blknos = self.blknos[slobj]
blklocs = self.blklocs[slobj]
else:
blknos = algos.take_1d(
self.blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_1d(
self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=False)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(
blk.take_nd(
blklocs[mgr_locs.indexer],
axis=0,
new_mgr_locs=mgr_locs,
fill_tuple=None,
)
)
return blocks
def _make_na_block(self, placement, fill_value=None):
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis: int = 1, verify: bool = True, convert: bool = True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype="int64")
)
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception("Indices must be nonzero and less than the axis length")
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(
new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
)
def equals(self, other) -> bool:
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
if len(self.blocks) != len(other.blocks):
return False
# canonicalize block order, using a tuple combining the mgr_locs
# then type name because there might be unconsolidated
# blocks (say, Categorical) which can only be distinguished by
# the iteration order
def canonicalize(block):
return (block.mgr_locs.as_array.tolist(), block.dtype.name)
self_blocks = sorted(self.blocks, key=canonicalize)
other_blocks = sorted(other.blocks, key=canonicalize)
return all(
block.equals(oblock) for block, oblock in zip(self_blocks, other_blocks)
)
def unstack(self, unstacker_func, fill_value) -> "BlockManager":
"""
Return a BlockManager with all blocks unstacked..
Parameters
----------
unstacker_func : callable
A (partially-applied) ``pd.core.reshape._Unstacker`` class.
fill_value : Any
fill_value for newly introduced missing values.
Returns
-------
unstacked : BlockManager
"""
n_rows = self.shape[-1]
dummy = unstacker_func(np.empty((0, 0)), value_columns=self.items)
new_columns = dummy.get_new_columns()
new_index = dummy.get_new_index()
new_blocks: List[Block] = []
columns_mask: List[np.ndarray] = []
for blk in self.blocks:
blocks, mask = blk._unstack(
partial(unstacker_func, value_columns=self.items[blk.mgr_locs.indexer]),
new_columns,
n_rows,
fill_value,
)
new_blocks.extend(blocks)
columns_mask.extend(mask)
new_columns = new_columns[columns_mask]
bm = BlockManager(new_blocks, [new_columns, new_index])
return bm
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(
self,
block: Block,
axis: Union[Index, List[Index]],
do_integrity_check: bool = False,
fastpath: bool = False,
):
assert isinstance(block, Block), type(block)
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis"
)
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
else:
self.axes = [ensure_index(axis)]
self.blocks = tuple([block])
@classmethod
def from_blocks(
cls, blocks: List[Block], axes: List[Index]
) -> "SingleBlockManager":
"""
Constructor for BlockManager and SingleBlockManager with same signature.
"""
assert len(blocks) == 1
assert len(axes) == 1
return cls(blocks[0], axes[0], do_integrity_check=False, fastpath=True)
@classmethod
def from_array(cls, array: ArrayLike, index: Index) -> "SingleBlockManager":
"""
Constructor for if we have an array that is not yet a Block.
"""
block = make_block(array, placement=slice(0, len(index)), ndim=1)
return cls(block, index, fastpath=True)
def _post_setstate(self):
pass
@property
def _block(self) -> Block:
return self.blocks[0]
@property
def _blknos(self):
""" compat with BlockManager """
return None
@property
def _blklocs(self):
""" compat with BlockManager """
return None
def get_slice(self, slobj: slice, axis: int = 0) -> "SingleBlockManager":
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
blk = self._block
array = blk._slice(slobj)
block = blk.make_block_same_class(array, placement=range(len(array)))
return type(self)(block, self.index[slobj], fastpath=True)
@property
def index(self) -> Index:
return self.axes[0]
@property
def dtype(self) -> DtypeObj:
return self._block.dtype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_dtypes(self) -> np.ndarray:
return np.array([self._block.dtype])
def external_values(self):
"""The array that Series.values returns"""
return self._block.external_values()
def internal_values(self):
"""The array that Series._values returns"""
return self._block.internal_values()
@property
def _can_hold_na(self) -> bool:
return self._block._can_hold_na
def is_consolidated(self) -> bool:
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
raise NotImplementedError("Use series._values[loc] instead")
def concat(self, to_concat, new_axis: Index) -> "SingleBlockManager":
"""
Concatenate a list of SingleBlockManagers into a single
SingleBlockManager.
Used for pd.concat of Series objects with axis=0.
Parameters
----------
to_concat : list of SingleBlockManagers
new_axis : Index of the result
Returns
-------
SingleBlockManager
"""
non_empties = [x for x in to_concat if len(x) > 0]
# check if all series are of the same block type:
if len(non_empties) > 0:
blocks = [obj.blocks[0] for obj in non_empties]
if len({b.dtype for b in blocks}) == 1:
new_block = blocks[0].concat_same_type(blocks)
else:
values = [x.values for x in blocks]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
else:
values = [x._block.values for x in to_concat]
values = concat_compat(values)
new_block = make_block(values, placement=slice(0, len(values), 1))
mgr = SingleBlockManager(new_block, new_axis)
return mgr
# --------------------------------------------------------------------
# Constructor Helpers
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# if blocks[0] is of length 0, return empty blocks
if not len(blocks[0]):
blocks = []
else:
# It's OK if a single block is passed as values, its placement
# is basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [
make_block(values=blocks[0], placement=slice(0, len(axes[0])))
]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
blocks = [getattr(b, "values", b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except ValueError as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
# Correcting the user facing error message during dataframe construction
if len(passed) <= 2:
passed = passed[::-1]
implied = tuple(len(ax) for ax in axes)
# Correcting the user facing error message during dataframe construction
if len(implied) <= 2:
implied = implied[::-1]
if passed == implied and e is not None:
raise e
if block_shape[0] == 0:
raise ValueError("Empty data passed with indices specified.")
raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
# -----------------------------------------------------------------------
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
items_dict = defaultdict(list)
extra_locs = []
names_idx = ensure_index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
block_type = get_block_type(v)
items_dict[block_type.__name__].append((i, k, v))
blocks = []
if len(items_dict["FloatBlock"]):
float_blocks = _multi_blockify(items_dict["FloatBlock"])
blocks.extend(float_blocks)
if len(items_dict["ComplexBlock"]):
complex_blocks = _multi_blockify(items_dict["ComplexBlock"])
blocks.extend(complex_blocks)
if len(items_dict["TimeDeltaBlock"]):
timedelta_blocks = _multi_blockify(items_dict["TimeDeltaBlock"])
blocks.extend(timedelta_blocks)
if len(items_dict["IntBlock"]):
int_blocks = _multi_blockify(items_dict["IntBlock"])
blocks.extend(int_blocks)
if len(items_dict["DatetimeBlock"]):
datetime_blocks = _simple_blockify(items_dict["DatetimeBlock"], _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(items_dict["DatetimeTZBlock"]):
dttz_blocks = [
make_block(array, klass=DatetimeTZBlock, placement=[i])
for i, _, array in items_dict["DatetimeTZBlock"]
]
blocks.extend(dttz_blocks)
if len(items_dict["BoolBlock"]):
bool_blocks = _simple_blockify(items_dict["BoolBlock"], np.bool_)
blocks.extend(bool_blocks)
if len(items_dict["ObjectBlock"]) > 0:
object_blocks = _simple_blockify(items_dict["ObjectBlock"], np.object_)
blocks.extend(object_blocks)
if len(items_dict["CategoricalBlock"]) > 0:
cat_blocks = [
make_block(array, klass=CategoricalBlock, placement=[i])
for i, _, array in items_dict["CategoricalBlock"]
]
blocks.extend(cat_blocks)
if len(items_dict["ExtensionBlock"]):
external_blocks = [
make_block(array, klass=ExtensionBlock, placement=[i])
for i, _, array in items_dict["ExtensionBlock"]
]
blocks.extend(external_blocks)
if len(items_dict["ObjectValuesExtensionBlock"]):
external_blocks = [
make_block(array, klass=ObjectValuesExtensionBlock, placement=[i])
for i, _, array in items_dict["ObjectValuesExtensionBlock"]
]
blocks.extend(external_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
"""
return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# TODO: CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x._values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return (len(x),)
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks: Sequence[Block]) -> Optional[DtypeObj]:
"""
Find the common dtype for `blocks`.
Parameters
----------
blocks : List[Block]
Returns
-------
dtype : np.dtype, ExtensionDtype, or None
None is returned when `blocks` is empty.
"""
if not len(blocks):
return None
return find_common_type([b.dtype for b in blocks])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(
list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate
)
new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _compare_or_regex_search(a, b, regex=False):
"""
Compare two array_like inputs of the same shape or two scalar values
Calls operator.eq or re.search, depending on regex argument. If regex is
True, perform an element-wise regex matching.
Parameters
----------
a : array_like or scalar
b : array_like or scalar
regex : bool, default False
Returns
-------
mask : array_like of bool
"""
if not regex:
op = lambda x: operator.eq(x, b)
else:
op = np.vectorize(
lambda x: bool(re.search(b, x)) if isinstance(x, str) else False
)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
if is_datetimelike_v_numeric(a, b) or is_numeric_v_string_like(a, b):
# GH#29553 avoid deprecation warnings from numpy
result = False
else:
result = op(a)
if is_scalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = f"ndarray(dtype={a.dtype})"
if is_b_array:
type_names[1] = f"ndarray(dtype={b.dtype})"
raise TypeError(
f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
)
return result
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return (
"slice",
slice_or_indexer,
libinternals.slice_len(slice_or_indexer, length),
)
elif (
isinstance(slice_or_indexer, np.ndarray) and slice_or_indexer.dtype == np.bool_
):
return "mask", slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return "fancy", indexer, len(indexer)
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plans = [
get_mgr_concatenation_plan(mgr, indexers) for mgr, indexers in mgrs_indexers
]
concat_plan = combine_concat_plans(concat_plans, concat_axis)
blocks = []
for placement, join_units in concat_plan:
if len(join_units) == 1 and not join_units[0].indexers:
b = join_units[0].block
values = b.values
if copy:
values = values.copy()
else:
values = values.view()
b = b.make_block_same_class(values, placement=placement)
elif is_uniform_join_units(join_units):
b = join_units[0].block.concat_same_type(
[ju.block for ju in join_units], placement=placement
)
else:
b = make_block(
concatenate_join_units(join_units, concat_axis, copy=copy),
placement=placement,
)
blocks.append(b)
return BlockManager(blocks, axes)
| 31.513086
| 88
| 0.572743
|
2fa76ec53e525e1b2bbdb7bf1a5c16531a789e89
| 106,194
|
py
|
Python
|
pytests/powertest.py
|
GeertBosch/mongo
|
884d232473dca72e0872f0e540d4c3108c1e0b3d
|
[
"Apache-2.0"
] | null | null | null |
pytests/powertest.py
|
GeertBosch/mongo
|
884d232473dca72e0872f0e540d4c3108c1e0b3d
|
[
"Apache-2.0"
] | null | null | null |
pytests/powertest.py
|
GeertBosch/mongo
|
884d232473dca72e0872f0e540d4c3108c1e0b3d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Powercycle test.
Tests robustness of mongod to survive multiple powercycle events.
Client & server side powercycle test script.
This script can be run against any host which is reachable via ssh.
Note - the remote hosts should be running bash shell (this script may fail otherwise).
There are no assumptions on the server what is the current deployment of MongoDB.
For Windows the assumption is that Cygwin is installed.
The server needs these utilities:
- python 2.7 or higher
- sshd
- rsync
This script will either download a MongoDB tarball or use an existing setup.
"""
from __future__ import print_function
import atexit
import collections
import copy
import datetime
import distutils.spawn # pylint: disable=no-name-in-module
import json
import importlib
import logging
import optparse
import os
import pipes
import posixpath
import random
import re
import shlex
import shutil
import signal
import stat
import string
import sys
import tarfile
import tempfile
import threading
import time
import traceback
import urlparse
import zipfile
import psutil
import pymongo
import requests
import yaml
# The subprocess32 module is untested on Windows and thus isn't recommended for use, even when it's
# installed. See https://github.com/google/python-subprocess32/blob/3.2.7/README.md#usage.
if os.name == "posix" and sys.version_info[0] == 2:
try:
import subprocess32 as subprocess
except ImportError:
import warnings
warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
" available. When using the subprocess module, a child process may"
" trigger an invalid free(). See SERVER-22219 for more details."),
RuntimeWarning)
import subprocess # type: ignore
else:
import subprocess
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# See https://docs.python.org/2/library/sys.html#sys.platform
_IS_WINDOWS = sys.platform == "win32" or sys.platform == "cygwin"
_IS_LINUX = sys.platform.startswith("linux")
_IS_DARWIN = sys.platform == "darwin"
def _try_import(module, name=None):
"""Attempt to import a module and add it as a global variable.
If the import fails, then this function doesn't trigger an exception.
"""
try:
module_name = module if not name else name
globals()[module_name] = importlib.import_module(module)
except ImportError:
pass
# These modules are used on the 'client' side.
_try_import("buildscripts.aws_ec2", "aws_ec2")
_try_import("buildscripts.remote_operations", "remote_operations")
if _IS_WINDOWS:
# These modules are used on both sides for dumping python stacks.
import win32api
import win32event
# These modules are used on the 'server' side.
_try_import("ntsecuritycon")
_try_import("pywintypes")
_try_import("win32file")
_try_import("win32security")
_try_import("win32service")
_try_import("win32serviceutil")
# pylint: disable=too-many-lines
__version__ = "0.1"
LOGGER = logging.getLogger(__name__)
REPORT_JSON = {} # type: ignore
REPORT_JSON_FILE = ""
REPORT_JSON_SUCCESS = False
EXIT_YML = {"exit_code": 0} # type: ignore
EXIT_YML_FILE = None
def local_exit(code):
"""Capture exit code and invoke sys.exit."""
EXIT_YML["exit_code"] = code
sys.exit(code)
def exit_handler():
"""Exit handler to generate report.json, kill spawned processes, delete temporary files."""
if REPORT_JSON:
LOGGER.debug("Exit handler: Updating report file %s", REPORT_JSON_FILE)
try:
test_start = REPORT_JSON["results"][0]["start"]
test_end = int(time.time())
test_time = test_end - test_start
if REPORT_JSON_SUCCESS:
failures = 0
status = "pass"
exit_code = 0
else:
failures = 1
status = "fail"
exit_code = 1
REPORT_JSON["failures"] = failures
REPORT_JSON["results"][0]["status"] = status
REPORT_JSON["results"][0]["exit_code"] = exit_code
REPORT_JSON["results"][0]["end"] = test_end
REPORT_JSON["results"][0]["elapsed"] = test_time
with open(REPORT_JSON_FILE, "w") as jstream:
json.dump(REPORT_JSON, jstream)
LOGGER.debug("Exit handler: report file contents %s", REPORT_JSON)
except: # pylint: disable=bare-except
pass
if EXIT_YML_FILE:
LOGGER.debug("Exit handler: Saving exit file %s", EXIT_YML_FILE)
try:
with open(EXIT_YML_FILE, "w") as yaml_stream:
yaml.safe_dump(EXIT_YML, yaml_stream)
except: # pylint: disable=bare-except
pass
LOGGER.debug("Exit handler: Killing processes")
try:
Processes.kill_all()
except: # pylint: disable=bare-except
pass
LOGGER.debug("Exit handler: Cleaning up temporary files")
try:
NamedTempFile.delete_all()
except: # pylint: disable=bare-except
pass
def register_signal_handler(handler):
"""Register the signal handler."""
def _handle_set_event(event_handle, handler):
"""Event object handler that will dump the stacks of all threads for Windows."""
while True:
try:
# Wait for task time out to dump stacks.
ret = win32event.WaitForSingleObject(event_handle, win32event.INFINITE)
if ret != win32event.WAIT_OBJECT_0:
LOGGER.error("_handle_set_event WaitForSingleObject failed: %d", ret)
return
except win32event.error as err:
LOGGER.error("Exception from win32event.WaitForSingleObject with error: %s", err)
else:
handler(None, None)
if _IS_WINDOWS:
# Create unique event_name.
event_name = "Global\\Mongo_Python_{:d}".format(os.getpid())
LOGGER.debug("Registering event %s", event_name)
try:
security_attributes = None
manual_reset = False
initial_state = False
task_timeout_handle = win32event.CreateEvent(security_attributes, manual_reset,
initial_state, event_name)
except win32event.error as err:
LOGGER.error("Exception from win32event.CreateEvent with error: %s", err)
return
# Register to close event object handle on exit.
atexit.register(win32api.CloseHandle, task_timeout_handle)
# Create thread.
event_handler_thread = threading.Thread(target=_handle_set_event, kwargs={
"event_handle": task_timeout_handle, "handler": handler
}, name="windows_event_handler_thread")
event_handler_thread.daemon = True
event_handler_thread.start()
else:
# Otherwise register a signal handler for SIGUSR1.
signal_num = signal.SIGUSR1
signal.signal(signal_num, handler)
def dump_stacks_and_exit(signum, frame): # pylint: disable=unused-argument
"""Provide a handler that will dump the stacks of all threads."""
LOGGER.info("Dumping stacks!")
sb = []
frames = sys._current_frames() # pylint: disable=protected-access
sb.append("Total threads: {}\n".format(len(frames)))
sb.append("")
for thread_id in frames:
stack = frames[thread_id]
sb.append("Thread {}:".format(thread_id))
sb.append("".join(traceback.format_stack(stack)))
LOGGER.info("".join(sb))
if _IS_WINDOWS:
exit_handler()
os._exit(1) # pylint: disable=protected-access
else:
sys.exit(1)
def child_processes(parent_pid):
"""Return a list of all child processes for a pid."""
# The child processes cannot be obtained from the parent on Windows from psutil. See
# https://stackoverflow.com/questions/30220732/python-psutil-not-showing-all-child-processes
child_procs = []
while psutil.pid_exists(parent_pid):
try:
child_procs = [p for p in psutil.process_iter(attrs=["pid"]) if parent_pid == p.ppid()]
break
except psutil.NoSuchProcess:
pass
for proc in child_procs:
proc_children = child_processes(proc.pid)
if proc_children:
child_procs += proc_children
return list(set(child_procs))
def kill_process(pid, kill_children=True):
"""Kill a process, and optionally it's children, by it's pid. Returns 0 if successful."""
try:
parent = psutil.Process(pid)
except psutil.NoSuchProcess:
LOGGER.warn("Could not kill process %d, as it no longer exists", pid)
return 0
procs = [parent]
if kill_children:
procs += child_processes(pid)
for proc in procs:
try:
LOGGER.debug("Killing process '%s' pid %d", proc.name(), proc.pid)
proc.kill()
except psutil.NoSuchProcess:
LOGGER.warn("Could not kill process %d, as it no longer exists", pid)
_, alive = psutil.wait_procs(procs, timeout=30, callback=None)
if alive:
for proc in alive:
LOGGER.error("Process %d still alive!", proc.pid)
return 0
def kill_processes(procs, kill_children=True):
"""Kill a list of processes and optionally it's children."""
for proc in procs:
LOGGER.debug("Starting kill of parent process %d", proc.pid)
kill_process(proc.pid, kill_children=kill_children)
ret = proc.wait()
LOGGER.debug("Finished kill of parent process %d has return code of %d", proc.pid, ret)
def get_extension(filename):
"""Return the extension of a file."""
return os.path.splitext(filename)[-1]
def executable_extension():
"""Return executable file extension."""
if _IS_WINDOWS:
return ".exe"
return ""
def abs_path(path):
"""Return absolute path for 'path'. Raises an exception on failure."""
if _IS_WINDOWS:
# Get the Windows absolute path.
cmd = "cygpath -wa {}".format(path)
ret, output = execute_cmd(cmd, use_file=True)
if ret:
raise Exception("Command \"{}\" failed with code {} and output message: {}".format(
cmd, ret, output))
return output.rstrip()
return os.path.abspath(os.path.normpath(path))
def symlink_dir(source_dir, dest_dir):
"""Symlink the 'dest_dir' to 'source_dir'."""
if _IS_WINDOWS:
win32file.CreateSymbolicLink( # pylint: disable=undefined-variable
dest_dir, source_dir, win32file.SYMBOLIC_LINK_FLAG_DIRECTORY) # pylint: disable=undefined-variable
else:
os.symlink(source_dir, dest_dir)
def get_bin_dir(root_dir):
"""Locate the 'bin' directory within 'root_dir' tree."""
for root, dirs, _ in os.walk(root_dir):
if "bin" in dirs:
return os.path.join(root, "bin")
return None
def create_temp_executable_file(cmds):
"""Create an executable temporary file containing 'cmds'. Returns file name."""
temp_file_name = NamedTempFile.create(suffix=".sh", directory="tmp")
with NamedTempFile.get(temp_file_name) as temp_file:
temp_file.write(cmds)
os_st = os.stat(temp_file_name)
os.chmod(temp_file_name, os_st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
return temp_file_name
def start_cmd(cmd, use_file=False):
"""Start command and returns proc instance from Popen."""
orig_cmd = ""
# Multi-commands need to be written to a temporary file to execute on Windows.
# This is due to complications with invoking Bash in Windows.
if use_file:
orig_cmd = cmd
temp_file = create_temp_executable_file(cmd)
# The temporary file name will have '\' on Windows and needs to be converted to '/'.
cmd = "bash -c {}".format(temp_file.replace("\\", "/"))
# If 'cmd' is specified as a string, convert it to a list of strings.
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if use_file:
LOGGER.debug("Executing '%s', tempfile contains: %s", cmd, orig_cmd)
else:
LOGGER.debug("Executing '%s'", cmd)
proc = subprocess.Popen(cmd, close_fds=True)
LOGGER.debug("Spawned process %s pid %d", psutil.Process(proc.pid).name(), proc.pid)
return proc
def execute_cmd(cmd, use_file=False):
"""Execute command and returns return_code, output from command."""
orig_cmd = ""
# Multi-commands need to be written to a temporary file to execute on Windows.
# This is due to complications with invoking Bash in Windows.
if use_file:
orig_cmd = cmd
temp_file = create_temp_executable_file(cmd)
# The temporary file name will have '\' on Windows and needs to be converted to '/'.
cmd = "bash -c {}".format(temp_file.replace("\\", "/"))
# If 'cmd' is specified as a string, convert it to a list of strings.
if isinstance(cmd, str):
cmd = shlex.split(cmd)
if use_file:
LOGGER.debug("Executing '%s', tempfile contains: %s", cmd, orig_cmd)
else:
LOGGER.debug("Executing '%s'", cmd)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = proc.communicate()
error_code = proc.returncode
if error_code:
output = "Error executing cmd {}: {}".format(cmd, output)
finally:
if use_file:
os.remove(temp_file)
return error_code, output
def get_user_host(user_host):
"""Return a tuple (user, host) from the user_host string."""
if "@" in user_host:
return tuple(user_host.split("@"))
return None, user_host
def parse_options(options):
"""Parse options and returns a dict.
Since there are options which can be specifed with a short('-') or long
('--') form, we preserve that in key map as {option_name: (value, form)}.
"""
options_map = collections.defaultdict(list)
opts = shlex.split(options)
for opt in opts:
# Handle options which could start with "-" or "--".
if opt.startswith("-"):
opt_idx = 2 if opt[1] == "-" else 1
opt_form = opt[:opt_idx]
eq_idx = opt.find("=")
if eq_idx == -1:
opt_name = opt[opt_idx:]
options_map[opt_name] = (None, opt_form)
else:
opt_name = opt[opt_idx:eq_idx]
options_map[opt_name] = (opt[eq_idx + 1:], opt_form)
opt_name = None
elif opt_name:
options_map[opt_name] = (opt, opt_form)
return options_map
def download_file(url, file_name, download_retries=5):
"""Return True if download was successful, raise error if download fails."""
LOGGER.info("Downloading %s to %s", url, file_name)
while download_retries > 0:
with requests.Session() as session:
adapter = requests.adapters.HTTPAdapter(max_retries=download_retries)
session.mount(url, adapter)
response = session.get(url, stream=True)
response.raise_for_status()
with open(file_name, "wb") as file_handle:
try:
for block in response.iter_content(1024 * 1000):
file_handle.write(block)
except requests.exceptions.ChunkedEncodingError as err:
download_retries -= 1
if download_retries == 0:
raise Exception("Incomplete download for URL {}: {}".format(url, err))
continue
# Check if file download was completed.
if "Content-length" in response.headers:
url_content_length = int(response.headers["Content-length"])
file_size = os.path.getsize(file_name)
# Retry download if file_size has an unexpected size.
if url_content_length != file_size:
download_retries -= 1
if download_retries == 0:
raise Exception("Downloaded file size ({} bytes) doesn't match content length"
"({} bytes) for URL {}".format(file_size, url_content_length,
url))
continue
return True
raise Exception("Unknown download problem for {} to file {}".format(url, file_name))
def install_tarball(tarball, root_dir):
"""Unzip and install 'tarball' into 'root_dir'."""
LOGGER.info("Installing %s to %s", tarball, root_dir)
output = ""
extensions = [".msi", ".tgz", ".zip"]
ext = get_extension(tarball)
if ext == ".tgz":
with tarfile.open(tarball, "r:gz") as tar_handle:
tar_handle.extractall(path=root_dir)
output = "Unzipped {} to {}: {}".format(tarball, root_dir, tar_handle.getnames())
ret = 0
elif ext == ".zip":
with zipfile.ZipFile(tarball, "r") as zip_handle:
zip_handle.extractall(root_dir)
output = "Unzipped {} to {}: {}".format(tarball, root_dir, zip_handle.namelist())
ret = 0
elif ext == ".msi":
if not _IS_WINDOWS:
raise Exception("Unsupported platform for MSI install")
tmp_dir = tempfile.mkdtemp(dir="c:\\")
# Change the ownership to $USER: as ssh over Cygwin does not preserve privileges
# (see https://cygwin.com/ml/cygwin/2004-09/msg00087.html).
cmds = """
msiexec /a {tarball} /qn TARGETDIR="{tmp_dir}" /l msi.log ;
if [ $? -ne 0 ]; then
echo "msiexec failed to extract from {tarball}" ;
echo See msi.log ;
exit 1 ;
fi ;
mv "{tmp_dir}"/* "{root_dir}" ;
chown -R $USER: "{root_dir}" ;
chmod -R 777 "{root_dir}" ;
winsysdir=c:/Windows/System32 ;
pushd "{root_dir}/System64" ;
for dll in * ;
do
if [ ! -f $winsysdir/$dll ]; then
echo "File $winsysdir/$dll does not exist, copying from $(pwd)" ;
cp $dll $winsysdir/ ;
else
echo "File $winsysdir/$dll already exists" ;
fi ;
done ;
popd ;
""".format( # pylint: disable=bad-continuation
tarball=tarball, tmp_dir=tmp_dir, root_dir=root_dir)
ret, output = execute_cmd(cmds, use_file=True)
shutil.rmtree(tmp_dir)
else:
raise Exception("Unsupported file extension to unzip {},"
" supported extensions are {}".format(tarball, extensions))
LOGGER.debug(output)
if ret:
raise Exception("Failed to install tarball {}, {}".format(tarball, output))
def chmod_x_binaries(bin_dir):
"""Change all file permissions in 'bin_dir' to executable for everyone."""
files = os.listdir(bin_dir)
LOGGER.debug("chmod +x %s %s", bin_dir, files)
for dir_file in files:
bin_file = os.path.join(bin_dir, dir_file)
os_st = os.stat(bin_file)
os.chmod(bin_file, os_st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
def chmod_w_file(chmod_file):
"""Change the permission for 'chmod_file' to '+w' for everyone."""
LOGGER.debug("chmod +w %s", chmod_file)
if _IS_WINDOWS:
# The os package cannot set the directory to '+w', so we use win32security.
# See https://stackoverflow.com/
# questions/12168110/setting-folder-permissions-in-windows-using-python
# pylint: disable=undefined-variable,unused-variable
user, domain, sec_type = win32security.LookupAccountName("", "Everyone")
file_sd = win32security.GetFileSecurity(chmod_file, win32security.DACL_SECURITY_INFORMATION)
dacl = file_sd.GetSecurityDescriptorDacl()
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, ntsecuritycon.FILE_GENERIC_WRITE, user)
file_sd.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(chmod_file, win32security.DACL_SECURITY_INFORMATION, file_sd)
# pylint: enable=undefined-variable,unused-variable
else:
os.chmod(chmod_file, os.stat(chmod_file) | stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH)
def set_windows_bootstatuspolicy():
"""For Windows hosts that are physical, this prevents boot to prompt after failure."""
LOGGER.info("Setting bootstatuspolicy to ignoreallfailures & boot timeout to 5 seconds")
cmds = """
echo 'Setting bootstatuspolicy to ignoreallfailures & boot timeout to 5 seconds' ;
bcdedit /set {default} bootstatuspolicy ignoreallfailures ;
bcdedit /set {current} bootstatuspolicy ignoreallfailures ;
bcdedit /timeout 5"""
ret, output = execute_cmd(cmds, use_file=True)
return ret, output
def install_mongod(bin_dir=None, tarball_url="latest", root_dir=None):
"""Set up 'root_dir'/bin to contain MongoDB binaries.
If 'bin_dir' is specified, then symlink it to 'root_dir'/bin.
Otherwise, download 'tarball_url' and symlink it's bin to 'root_dir'/bin.
If 'bin_dir' is specified, skip download and create symlink
from 'bin_dir' to 'root_dir'/bin.
"""
LOGGER.debug("install_mongod: %s %s %s", bin_dir, tarball_url, root_dir)
# Create 'root_dir', if it does not exist.
root_bin_dir = os.path.join(root_dir, "bin")
if not os.path.isdir(root_dir):
LOGGER.info("install_mongod: creating %s", root_dir)
os.makedirs(root_dir)
# Symlink the 'bin_dir', if it's specified, to 'root_bin_dir'
if bin_dir and os.path.isdir(bin_dir):
symlink_dir(bin_dir, root_bin_dir)
return
if tarball_url == "latest":
# TODO SERVER-31021: Support all platforms.
if _IS_WINDOWS:
# MSI default:
# https://fastdl.mongodb.org/win32/mongodb-win32-x86_64-2008plus-ssl-latest-signed.msi
tarball_url = (
"https://fastdl.mongodb.org/win32/mongodb-win32-x86_64-2008plus-ssl-latest.zip")
elif _IS_LINUX:
tarball_url = "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-latest.tgz"
tarball = os.path.split(urlparse.urlsplit(tarball_url).path)[-1]
download_file(tarball_url, tarball)
install_tarball(tarball, root_dir)
chmod_x_binaries(get_bin_dir(root_dir))
# Symlink the bin dir from the tarball to 'root_bin_dir'.
# Since get_bin_dir returns an abolute path, we need to remove 'root_dir'
tarball_bin_dir = get_bin_dir(root_dir).replace("{}/".format(root_dir), "")
LOGGER.debug("Symlink %s to %s", tarball_bin_dir, root_bin_dir)
symlink_dir(tarball_bin_dir, root_bin_dir)
def print_uptime():
"""Print the last time the system was booted, and the uptime (in seconds)."""
boot_time_epoch = psutil.boot_time()
boot_time = datetime.datetime.fromtimestamp(boot_time_epoch).strftime('%Y-%m-%d %H:%M:%S.%f')
uptime = int(time.time() - boot_time_epoch)
LOGGER.info("System was last booted %s, up %d seconds", boot_time, uptime)
def call_remote_operation(local_ops, remote_python, script_name, client_args, operation):
"""Call the remote operation and returns tuple (ret, ouput)."""
client_call = "{} {} {} {}".format(remote_python, script_name, client_args, operation)
ret, output = local_ops.shell(client_call)
return ret, output
def is_instance_running(ret, aws_status):
"""Return true if instance is in a running state."""
return ret == 0 and aws_status.state["Name"] == "running"
class Processes(object):
"""Class to create and kill spawned processes."""
_PROC_LIST = [] # type: ignore
@classmethod
def create(cls, cmds):
"""Create a spawned process."""
proc = start_cmd(cmds, use_file=True)
cls._PROC_LIST.append(proc)
@classmethod
def kill(cls, proc):
"""Kill a spawned process and all it's children."""
kill_processes([proc], kill_children=True)
cls._PROC_LIST.remove(proc)
@classmethod
def kill_all(cls):
"""Kill all spawned processes."""
procs = copy.copy(cls._PROC_LIST)
for proc in procs:
cls.kill(proc)
class NamedTempFile(object):
"""Class to control temporary files."""
_FILE_MAP = {} # type: ignore
_DIR_LIST = [] # type: ignore
@classmethod
def create(cls, directory=None, suffix=""):
"""Create a temporary file, and optional directory, and returns the file name."""
if directory and not os.path.isdir(directory):
LOGGER.debug("Creating temporary directory %s", directory)
os.makedirs(directory)
cls._DIR_LIST.append(directory)
temp_file = tempfile.NamedTemporaryFile(suffix=suffix, dir=directory, delete=False)
cls._FILE_MAP[temp_file.name] = temp_file
return temp_file.name
@classmethod
def get(cls, name):
"""Get temporary file object. Raises an exception if the file is unknown."""
if name not in cls._FILE_MAP:
raise Exception("Unknown temporary file {}.".format(name))
return cls._FILE_MAP[name]
@classmethod
def delete(cls, name):
"""Delete temporary file. Raises an exception if the file is unknown."""
if name not in cls._FILE_MAP:
raise Exception("Unknown temporary file {}.".format(name))
if not os.path.exists(name):
LOGGER.debug("Temporary file %s no longer exists", name)
del cls._FILE_MAP[name]
return
try:
os.remove(name)
except (IOError, OSError) as err:
LOGGER.warn("Unable to delete temporary file %s with error %s", name, err)
if not os.path.exists(name):
del cls._FILE_MAP[name]
@classmethod
def delete_dir(cls, directory):
"""Delete temporary directory. Raises an exception if the directory is unknown."""
if directory not in cls._DIR_LIST:
raise Exception("Unknown temporary directory {}.".format(directory))
if not os.path.exists(directory):
LOGGER.debug("Temporary directory %s no longer exists", directory)
cls._DIR_LIST.remove(directory)
return
try:
shutil.rmtree(directory)
except (IOError, OSError) as err:
LOGGER.warn("Unable to delete temporary directory %s with error %s", directory, err)
if not os.path.exists(directory):
cls._DIR_LIST.remove(directory)
@classmethod
def delete_all(cls):
"""Delete all temporary files and directories."""
for name in list(cls._FILE_MAP):
cls.delete(name)
for directory in cls._DIR_LIST:
cls.delete_dir(directory)
class ProcessControl(object):
"""Process control class.
Control processes either by name or a list of pids. If name is supplied, then
all matching pids are controlled.
"""
def __init__(self, name=None, pids=None):
"""Provide either 'name' or 'pids' to control the process."""
if not name and not pids:
raise Exception("Either 'process_name' or 'pids' must be specifed")
self.name = name
self.pids = []
if pids:
self.pids = pids
self.procs = []
def get_pids(self):
"""Return list of process ids for process 'self.name'."""
if not self.name:
return self.pids
self.pids = []
for proc in psutil.process_iter():
if proc.name() == self.name:
self.pids.append(proc.pid)
return self.pids
def get_name(self):
"""Return process name or name of first running process from pids."""
if not self.name:
for pid in self.get_pids():
proc = psutil.Process(pid)
if psutil.pid_exists(pid):
self.name = proc.name()
break
return self.name
def get_procs(self):
"""Return a list of 'proc' for the associated pids."""
procs = []
for pid in self.get_pids():
procs.append(psutil.Process(pid))
return procs
def is_running(self):
"""Return true if any process is running that either matches on name or pids."""
for pid in self.get_pids():
if psutil.pid_exists(pid):
return True
return False
def kill(self):
"""Kill all running processes that match the list of pids."""
if self.is_running():
for proc in self.get_procs():
try:
proc.kill()
except psutil.NoSuchProcess:
LOGGER.info("Could not kill process with pid %d, as it no longer exists",
proc.pid)
# pylint: disable=undefined-variable,unused-variable
class WindowsService(object):
"""Windows service control class."""
def __init__(self, name, bin_path, bin_options, start_type=None):
"""Initialize WindowsService."""
self.name = name
self.bin_name = os.path.basename(bin_path)
self.bin_path = bin_path
self.bin_options = bin_options
if start_type is not None:
self.start_type = start_type
else:
self.start_type = win32service.SERVICE_DEMAND_START
self.pids = []
self._states = {
win32service.SERVICE_CONTINUE_PENDING: "continue pending",
win32service.SERVICE_PAUSE_PENDING: "pause pending",
win32service.SERVICE_PAUSED: "paused",
win32service.SERVICE_RUNNING: "running",
win32service.SERVICE_START_PENDING: "start pending",
win32service.SERVICE_STOPPED: "stopped",
win32service.SERVICE_STOP_PENDING: "stop pending",
}
def create(self):
"""Create service, if not installed. Return (code, output) tuple."""
if self.status() in self._states.values():
return 1, "Service '{}' already installed, status: {}".format(self.name, self.status())
try:
win32serviceutil.InstallService(pythonClassString="Service.{}".format(
self.name), serviceName=self.name, displayName=self.name, startType=self.start_type,
exeName=self.bin_path, exeArgs=self.bin_options)
ret = 0
output = "Service '{}' created".format(self.name)
except pywintypes.error as err:
ret = err.winerror
output = "{}: {}".format(err[1], err[2])
return ret, output
def update(self):
"""Update installed service. Return (code, output) tuple."""
if self.status() not in self._states.values():
return 1, "Service update '{}' status: {}".format(self.name, self.status())
try:
win32serviceutil.ChangeServiceConfig(pythonClassString="Service.{}".format(
self.name), serviceName=self.name, displayName=self.name, startType=self.start_type,
exeName=self.bin_path, exeArgs=self.bin_options)
ret = 0
output = "Service '{}' updated".format(self.name)
except pywintypes.error as err:
ret = err.winerror
output = "{}: {}".format(err[1], err[2])
return ret, output
def delete(self):
"""Delete service. Return (code, output) tuple."""
if self.status() not in self._states.values():
return 1, "Service delete '{}' status: {}".format(self.name, self.status())
try:
win32serviceutil.RemoveService(serviceName=self.name)
ret = 0
output = "Service '{}' deleted".format(self.name)
except pywintypes.error as err:
ret = err.winerror
output = "{}: {}".format(err[1], err[2])
return ret, output
def start(self):
"""Start service. Return (code, output) tuple."""
if self.status() not in self._states.values():
return 1, "Service start '{}' status: {}".format(self.name, self.status())
try:
win32serviceutil.StartService(serviceName=self.name)
ret = 0
output = "Service '{}' started".format(self.name)
except pywintypes.error as err:
ret = err.winerror
output = "{}: {}".format(err[1], err[2])
proc = ProcessControl(name=self.bin_name)
self.pids = proc.get_pids()
return ret, output
def stop(self):
"""Stop service. Return (code, output) tuple."""
self.pids = []
if self.status() not in self._states.values():
return 1, "Service '{}' status: {}".format(self.name, self.status())
try:
win32serviceutil.StopService(serviceName=self.name)
ret = 0
output = "Service '{}' stopped".format(self.name)
except pywintypes.error as err:
ret = err.winerror
output = "{}: {}".format(err[1], err[2])
return ret, output
def status(self):
"""Return state of the service as a string."""
try:
# QueryServiceStatus returns a tuple:
# (scvType, svcState, svcControls, err, svcErr, svcCP, svcWH)
# See https://msdn.microsoft.com/en-us/library/windows/desktop/ms685996(v=vs.85).aspx
scv_type, svc_state, svc_controls, err, svc_err, svc_cp, svc_wh = (
win32serviceutil.QueryServiceStatus(serviceName=self.name))
if svc_state in self._states:
return self._states[svc_state]
return "unknown"
except pywintypes.error:
return "not installed"
def get_pids(self):
"""Return list of pids for service."""
return self.pids
# pylint: enable=undefined-variable,unused-variable
class PosixService(object):
"""Service control on POSIX systems.
Simulates service control for background processes which fork themselves,
i.e., mongod with '--fork'.
"""
def __init__(self, name, bin_path, bin_options):
"""Initialize PosixService."""
self.name = name
self.bin_path = bin_path
self.bin_name = os.path.basename(bin_path)
self.bin_options = bin_options
self.pids = []
def create(self): # pylint: disable=no-self-use
"""Simulate create service. Returns (code, output) tuple."""
return 0, None
def update(self): # pylint: disable=no-self-use
"""Simulate update service. Returns (code, output) tuple."""
return 0, None
def delete(self): # pylint: disable=no-self-use
"""Simulate delete service. Returns (code, output) tuple."""
return 0, None
def start(self):
"""Start process. Returns (code, output) tuple."""
cmd = "{} {}".format(self.bin_path, self.bin_options)
ret, output = execute_cmd(cmd)
if not ret:
proc = ProcessControl(name=self.bin_name)
self.pids = proc.get_pids()
return ret, output
def stop(self):
"""Stop process. Returns (code, output) tuple."""
proc = ProcessControl(name=self.bin_name)
proc.kill()
self.pids = []
return 0, None
def status(self):
"""Return status of service."""
if self.get_pids():
return "running"
return "stopped"
def get_pids(self):
"""Return list of pids for process."""
return self.pids
class MongodControl(object): # pylint: disable=too-many-instance-attributes
"""Control mongod process."""
def __init__( # pylint: disable=too-many-arguments
self, bin_dir, db_path, log_path, port, options=None):
"""Initialize MongodControl."""
self.process_name = "mongod{}".format(executable_extension())
self.bin_dir = bin_dir
if self.bin_dir:
self.bin_path = os.path.join(self.bin_dir, self.process_name)
if not os.path.isfile(self.bin_path):
LOGGER.error("File %s does not exist.", self.bin_path)
else:
self.bin_path = None
self.options_map = parse_options(options)
self.db_path = db_path
self.set_mongod_option("dbpath", db_path)
self.log_path = log_path
self.set_mongod_option("logpath", log_path)
self.set_mongod_option("logappend")
self.port = port
self.set_mongod_option("port", port)
self.set_mongod_option("bind_ip", "0.0.0.0")
if _IS_WINDOWS:
self.set_mongod_option("service")
self._service = WindowsService
else:
self.set_mongod_option("fork")
self._service = PosixService
# After mongod has been installed, self.bin_path is defined.
if self.bin_path:
self.service = self._service("mongod-powertest", self.bin_path, self.mongod_options())
def set_mongod_option(self, option, option_value=None, option_form="--"):
"""Set mongod command line option."""
self.options_map[option] = (option_value, option_form)
def get_mongod_option(self, option):
"""Return tuple of (value, form)."""
return self.options_map[option]
def get_mongod_service(self):
"""Return the service object used to control mongod."""
return self.service
def mongod_options(self):
"""Return string of mongod options, which can be used when invoking mongod."""
opt_string = ""
for opt_name in self.options_map:
opt_val, opt_form = self.options_map[opt_name]
opt_string += " {}{}".format(opt_form, opt_name)
if opt_val:
opt_string += " {}".format(opt_val)
return opt_string
def install(self, root_dir, tarball_url):
"""Return tuple (ret, ouput)."""
# Install mongod, if 'root_dir' does not exist.
if os.path.isdir(root_dir):
LOGGER.warning("Root dir %s already exists", root_dir)
else:
install_mongod(bin_dir=self.bin_dir, tarball_url=tarball_url, root_dir=root_dir)
self.bin_dir = get_bin_dir(root_dir)
if not self.bin_dir:
ret, output = execute_cmd("ls -lR '{}'".format(root_dir), use_file=True)
LOGGER.debug(output)
return 1, "No bin dir can be found under {}".format(root_dir)
self.bin_path = os.path.join(self.bin_dir, self.process_name)
# We need to instantiate the Service when installing, since the bin_path
# is only known after install_mongod runs.
self.service = self._service("mongod-powertest", self.bin_path, self.mongod_options())
ret, output = self.service.create()
return ret, output
def uninstall(self):
"""Return tuple (ret, ouput)."""
return self.service.delete()
@staticmethod
def cleanup(root_dir):
"""Return tuple (ret, ouput)."""
shutil.rmtree(root_dir, ignore_errors=True)
return 0, None
def start(self):
"""Return tuple (ret, ouput)."""
return self.service.start()
def update(self):
"""Return tuple (ret, ouput)."""
return self.service.update()
def stop(self):
"""Return tuple (ret, ouput)."""
return self.service.stop()
def status(self):
"""Return status of the process."""
return self.service.status()
def get_pids(self):
"""Return list of pids for process."""
return self.service.get_pids()
def verify_remote_access(remote_op):
"""Exit if the remote host is not accessible and save result to YML file."""
if not remote_op.access_established():
code, output = remote_op.access_info()
LOGGER.error("Exiting, unable to establish access (%d): %s", code, output)
EXIT_YML["ec2_ssh_failure"] = output
local_exit(code)
class LocalToRemoteOperations(object):
"""Local operations handler class for sending commands to the remote host.
Return (return code, output).
"""
def __init__( # pylint: disable=too-many-arguments
self, user_host, retries=2, retry_sleep=30, ssh_connection_options=None,
ssh_options=None, shell_binary="/bin/bash", use_shell=False):
"""Initialize LocalToRemoteOperations."""
self.remote_op = remote_operations.RemoteOperations( # pylint: disable=undefined-variable
user_host=user_host, ssh_connection_options=ssh_connection_options,
ssh_options=ssh_options, retries=retries, retry_sleep=retry_sleep, debug=True,
shell_binary=shell_binary, use_shell=use_shell)
def shell(self, cmds, remote_dir=None):
"""Return tuple (ret, output) from performing remote shell operation."""
return self.remote_op.shell(cmds, remote_dir)
def copy_from(self, files, remote_dir=None):
"""Return tuple (ret, output) from performing remote copy_to operation."""
return self.remote_op.copy_from(files, remote_dir)
def copy_to(self, files, remote_dir=None):
"""Return tuple (ret, output) from performing remote copy_from operation."""
return self.remote_op.copy_to(files, remote_dir)
def access_established(self):
"""Return True if remote access has been established."""
return self.remote_op.access_established()
def access_info(self):
"""Return the return code and output buffer from initial access attempt(s)."""
return self.remote_op.access_info()
def remote_handler(options, operations): # pylint: disable=too-many-branches,too-many-locals,too-many-statements
"""Remote operations handler executes all remote operations on the remote host.
These operations are invoked on the remote host's copy of this script.
Only one operation can be performed at a time.
"""
# Set 'root_dir' to absolute path.
root_dir = abs_path(options.root_dir)
if not operations:
raise ValueError("No remote operation specified.")
print_uptime()
LOGGER.info("Operations to perform %s", operations)
host = options.host if options.host else "localhost"
host_port = "{}:{}".format(host, options.port)
if options.repl_set:
options.mongod_options = "{} --replSet {}".format(options.mongod_options, options.repl_set)
# For MongodControl, the file references should be fully specified.
if options.mongodb_bin_dir:
bin_dir = abs_path(options.mongodb_bin_dir)
else:
bin_dir = get_bin_dir(root_dir)
db_path = abs_path(options.db_path)
log_path = abs_path(options.log_path)
mongod = MongodControl(bin_dir=bin_dir, db_path=db_path, log_path=log_path, port=options.port,
options=options.mongod_options)
mongo_client_opts = get_mongo_client_args(host=host, port=options.port, options=options)
# Perform the sequence of operations specified. If any operation fails
# then return immediately.
for operation in operations:
# This is the internal "crash" mechanism, which is executed on the remote host.
if operation == "crash_server":
ret, output = internal_crash(options.remote_sudo, options.crash_option)
# An internal crash on Windows is not immediate
try:
LOGGER.info("Waiting after issuing internal crash!")
time.sleep(30)
except IOError:
pass
elif operation == "kill_mongod":
# Unconditional kill of mongod
ret, output = kill_mongod()
elif operation == "install_mongod":
ret, output = mongod.install(root_dir, options.tarball_url)
LOGGER.info(output)
# Create mongod's dbpath, if it does not exist.
if not os.path.isdir(db_path):
os.makedirs(db_path)
# Create mongod's logpath directory, if it does not exist.
log_dir = os.path.dirname(log_path)
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
# Windows special handling.
if _IS_WINDOWS:
# The os package cannot set the directory to '+w'
# See https://docs.python.org/2/library/os.html#os.chmod
chmod_w_file(db_path)
chmod_w_file(log_dir)
# Disable boot prompt after system crash.
ret, output = set_windows_bootstatuspolicy()
LOGGER.info(output)
elif operation == "start_mongod":
# Always update the service before starting, as options might have changed.
ret, output = mongod.update()
LOGGER.info(output)
ret, output = mongod.start()
LOGGER.info(output)
if ret:
LOGGER.error("Failed to start mongod on port %d: %s", options.port, output)
return ret
LOGGER.info("Started mongod running on port %d pid %s", options.port, mongod.get_pids())
mongo = pymongo.MongoClient(**mongo_client_opts)
LOGGER.info("Server buildinfo: %s", mongo.admin.command("buildinfo"))
LOGGER.info("Server serverStatus: %s", mongo.admin.command("serverStatus"))
if options.repl_set:
ret = mongo_reconfig_replication(mongo, host_port, options.repl_set)
ret = 0 if not ret else 1
elif operation == "stop_mongod":
ret, output = mongod.stop()
LOGGER.info(output)
ret = wait_for_mongod_shutdown(mongod)
elif operation == "shutdown_mongod":
mongo = pymongo.MongoClient(**mongo_client_opts)
try:
mongo.admin.command("shutdown", force=True)
except pymongo.errors.AutoReconnect:
pass
ret = wait_for_mongod_shutdown(mongod)
elif operation == "rsync_data":
ret, output = rsync(options.db_path, options.rsync_dest, options.rsync_exclude_files)
LOGGER.info(output)
elif operation == "seed_docs":
mongo = pymongo.MongoClient(**mongo_client_opts)
ret = mongo_seed_docs(mongo, options.db_name, options.collection_name,
options.seed_doc_num)
elif operation == "validate_collections":
mongo = pymongo.MongoClient(**mongo_client_opts)
ret = mongo_validate_collections(mongo)
elif operation == "insert_canary":
mongo = pymongo.MongoClient(**mongo_client_opts)
ret = mongo_insert_canary(mongo, options.db_name, options.collection_name,
options.canary_doc)
elif operation == "validate_canary":
mongo = pymongo.MongoClient(**mongo_client_opts)
ret = mongo_validate_canary(mongo, options.db_name, options.collection_name,
options.canary_doc)
elif operation == "set_fcv":
mongo = pymongo.MongoClient(**mongo_client_opts)
try:
ret = mongo.admin.command("setFeatureCompatibilityVersion", options.fcv_version)
ret = 0 if ret["ok"] == 1 else 1
except pymongo.errors.OperationFailure as err:
LOGGER.error(err.message)
ret = err.code
elif operation == "remove_lock_file":
lock_file = os.path.join(options.db_path, "mongod.lock")
ret = 0
if os.path.exists(lock_file):
LOGGER.debug("Deleting mongod lockfile %s", lock_file)
try:
os.remove(lock_file)
except (IOError, OSError) as err:
LOGGER.warn("Unable to delete mongod lockfile %s with error %s", lock_file, err)
ret = err.code
else:
LOGGER.error("Unsupported remote option specified '%s'", operation)
ret = 1
if ret:
return ret
return 0
def rsync(src_dir, dest_dir, exclude_files=None):
"""Rsync 'src_dir' to 'dest_dir'."""
# Note rsync on Windows requires a Unix-style directory.
exclude_options = ""
exclude_str = ""
if exclude_files:
exclude_str = " (excluding {})".format(exclude_files)
if isinstance(exclude_files, str):
exclude_files = [exclude_files]
for exclude_file in exclude_files:
exclude_options = "{} --exclude '{}'".format(exclude_options, exclude_file)
LOGGER.info("Rsync'ing %s to %s%s", src_dir, dest_dir, exclude_str)
if not distutils.spawn.find_executable("rsync"):
return 1, "No rsync exists on the host, not rsync'ing"
cmds = "rsync -va --delete --quiet {} {} {}".format(exclude_options, src_dir, dest_dir)
ret, output = execute_cmd(cmds)
return ret, output
def kill_mongod():
"""Kill all mongod processes uncondtionally."""
if _IS_WINDOWS:
cmds = "taskkill /f /im mongod.exe"
else:
cmds = "pkill -9 mongod"
ret, output = execute_cmd(cmds, use_file=True)
return ret, output
def internal_crash(use_sudo=False, crash_option=None):
"""Internally crash the host this excutes on."""
# Windows can use NotMyFault to immediately crash itself, if it's been installed.
# See https://docs.microsoft.com/en-us/sysinternals/downloads/notmyfault
# Otherwise it's better to use an external mechanism instead.
if _IS_WINDOWS:
cmds = crash_option if crash_option else "shutdown /r /f /t 0"
ret, output = execute_cmd(cmds, use_file=True)
return ret, output
else:
# These operations simulate a console boot and require root privileges, see:
# - http://www.linuxjournal.com/content/rebooting-magic-way
# - https://www.mjmwired.net/kernel/Documentation/sysrq.txt
# These file operations could be performed natively,
# however since they require root (or sudo), we prefer to do them
# in a subprocess call to isolate them and not require the invocation
# of this script to be with sudo.
# Code to perform natively:
# with open("/proc/sys/kernel/sysrq", "w") as f:
# f.write("1\n")
# with open("/proc/sysrq-trigger", "w") as f:
# f.write("b\n")
sudo = "/usr/bin/sudo" if use_sudo else ""
cmds = """
echo "Server crashing now" | {sudo} wall ;
echo 1 | {sudo} tee /proc/sys/kernel/sysrq ;
echo b | {sudo} tee /proc/sysrq-trigger""".format(sudo=sudo)
ret, output = execute_cmd(cmds, use_file=True)
LOGGER.debug(output)
return 1, "Crash did not occur"
def crash_server_or_kill_mongod( # pylint: disable=too-many-arguments,,too-many-locals
options, crash_canary, canary_port, local_ops, script_name, client_args):
"""Crash server or kill mongod and optionally write canary doc. Return tuple (ret, output)."""
crash_wait_time = options.crash_wait_time + random.randint(0, options.crash_wait_time_jitter)
message_prefix = "Killing mongod" if options.crash_method == "kill" else "Crashing server"
LOGGER.info("%s in %d seconds", message_prefix, crash_wait_time)
time.sleep(crash_wait_time)
if options.crash_method == "mpower":
# Provide time for power to dissipate by sleeping 10 seconds before turning it back on.
crash_func = local_ops.shell
crash_args = [
"""
echo 0 > /dev/{crash_option} ;
sleep 10 ;
echo 1 > /dev/{crash_option}""".format(crash_option=options.crash_option)
]
local_ops = LocalToRemoteOperations(user_host=options.ssh_crash_user_host,
ssh_connection_options=options.ssh_crash_option,
shell_binary="/bin/sh")
verify_remote_access(local_ops)
elif options.crash_method == "internal" or options.crash_method == "kill":
crash_cmd = "crash_server" if options.crash_method == "internal" else "kill_mongod"
if options.canary == "remote":
# The crash canary function executes remotely, only if the
# crash_method is 'internal'.
canary = "--mongodPort {} --docForCanary \"{}\"".format(canary_port,
crash_canary["args"][3])
canary_cmd = "insert_canary"
else:
canary = ""
canary_cmd = ""
crash_func = local_ops.shell
crash_args = [
"{} {} --remoteOperation {} {} {} {}".format(options.remote_python, script_name,
client_args, canary, canary_cmd, crash_cmd)
]
elif options.crash_method == "aws_ec2":
ec2 = aws_ec2.AwsEc2() # pylint: disable=undefined-variable
crash_func = ec2.control_instance
crash_args = ["force-stop", options.instance_id, 600, True]
else:
message = "Unsupported crash method '{}' provided".format(options.crash_method)
LOGGER.error(message)
return 1, message
# Invoke the crash canary function, right before crashing the server.
if crash_canary and options.canary == "local":
crash_canary["function"](*crash_canary["args"])
ret, output = crash_func(*crash_args)
LOGGER.info(output)
return ret, output
def wait_for_mongod_shutdown(mongod_control, timeout=120):
"""Wait for for mongod to shutdown; return 0 if shutdown occurs within 'timeout', else 1."""
start = time.time()
status = mongod_control.status()
while status != "stopped":
if time.time() - start >= timeout:
LOGGER.error("The mongod process has not stopped, current status is %s", status)
return 1
LOGGER.info("Waiting for mongod process to stop, current status is %s ", status)
time.sleep(3)
status = mongod_control.status()
LOGGER.info("The mongod process has stopped")
# We wait a bit, since files could still be flushed to disk, which was causing
# rsync "file has vanished" errors.
time.sleep(5)
return 0
def get_mongo_client_args(host=None, port=None, options=None, server_selection_timeout_ms=600000,
socket_timeout_ms=600000):
"""Return keyword arg dict used in PyMongo client."""
# Set the default serverSelectionTimeoutMS & socketTimeoutMS to 10 minutes.
mongo_args = {
"serverSelectionTimeoutMS": server_selection_timeout_ms,
"socketTimeoutMS": socket_timeout_ms
}
if host:
mongo_args["host"] = host
if port:
mongo_args["port"] = port
# Set the writeConcern
if hasattr(options, "write_concern"):
mongo_args.update(yaml.safe_load(options.write_concern))
# Set the readConcernLevel
if hasattr(options, "read_concern_level") and options.read_concern_level:
mongo_args["readConcernLevel"] = options.read_concern_level
return mongo_args
def mongo_shell( # pylint: disable=too-many-arguments
mongo_path, work_dir, host_port, mongo_cmds, retries=5, retry_sleep=5):
"""Start mongo_path from work_dir, connecting to host_port and executes mongo_cmds."""
cmds = "cd {}; echo {} | {} {}".format(
pipes.quote(work_dir), pipes.quote(mongo_cmds), pipes.quote(mongo_path), host_port)
attempt_num = 0
while True:
ret, output = execute_cmd(cmds, use_file=True)
if not ret:
break
attempt_num += 1
if attempt_num > retries:
break
time.sleep(retry_sleep)
return ret, output
def mongod_wait_for_primary(mongo, timeout=60, sleep_interval=3):
"""Return True if mongod primary is available in replica set, within the specified timeout."""
start = time.time()
while not mongo.admin.command("isMaster")["ismaster"]:
time.sleep(sleep_interval)
if time.time() - start >= timeout:
return False
return True
def mongo_reconfig_replication(mongo, host_port, repl_set):
"""Reconfigure the mongod replica set. Return 0 if successful."""
# TODO: Rework reconfig logic as follows:
# 1. Start up mongod in standalone
# 2. Delete the config doc
# 3. Stop mongod
# 4. Start mongod
# When reconfiguring the replica set, due to a switch in ports
# it can only be done using force=True, as the node will not come up as Primary.
# The side affect of using force=True are large jumps in the config
# version, which after many reconfigs may exceed the 'int' value.
LOGGER.info("Reconfiguring replication %s %s", host_port, repl_set)
database = pymongo.database.Database(mongo, "local")
system_replset = database.get_collection("system.replset")
# Check if replica set has already been initialized
if not system_replset or not system_replset.find_one():
rs_config = {"_id": repl_set, "members": [{"_id": 0, "host": host_port}]}
ret = mongo.admin.command("replSetInitiate", rs_config)
LOGGER.info("Replication initialized: %s %s", ret, rs_config)
else:
# Wait until replication is initialized.
while True:
try:
ret = mongo.admin.command("replSetGetConfig")
if ret["ok"] != 1:
LOGGER.error("Failed replSetGetConfig: %s", ret)
return 1
rs_config = ret["config"]
# We only reconfig if there is a change to 'host'.
if rs_config["members"][0]["host"] != host_port:
# With force=True, version is ignored.
# rs_config["version"] = rs_config["version"] + 1
rs_config["members"][0]["host"] = host_port
ret = mongo.admin.command("replSetReconfig", rs_config, force=True)
if ret["ok"] != 1:
LOGGER.error("Failed replSetReconfig: %s", ret)
return 1
LOGGER.info("Replication reconfigured: %s", ret)
break
except pymongo.errors.AutoReconnect:
pass
except pymongo.errors.OperationFailure as err:
# src/mongo/base/error_codes.err: error_code("NotYetInitialized", 94)
if err.code != 94:
LOGGER.error("Replication failed to initialize: %s", ret)
return 1
primary_available = mongod_wait_for_primary(mongo)
LOGGER.debug("isMaster: %s", mongo.admin.command("isMaster"))
LOGGER.debug("replSetGetStatus: %s", mongo.admin.command("replSetGetStatus"))
return 0 if ret["ok"] == 1 and primary_available else 1
def mongo_seed_docs(mongo, db_name, coll_name, num_docs):
"""Seed a collection with random document values."""
def rand_string(max_length=1024):
"""Return random string of random length."""
return ''.join(random.choice(string.letters) for _ in range(random.randint(1, max_length)))
LOGGER.info("Seeding DB '%s' collection '%s' with %d documents, %d already exist", db_name,
coll_name, num_docs, mongo[db_name][coll_name].count())
random.seed()
base_num = 100000
bulk_num = min(num_docs, 10000)
bulk_loops = num_docs / bulk_num
for _ in xrange(bulk_loops):
num_coll_docs = mongo[db_name][coll_name].count()
if num_coll_docs >= num_docs:
break
mongo[db_name][coll_name].insert_many(
[{"x": random.randint(0, base_num), "doc": rand_string(1024)}
for _ in xrange(bulk_num)])
LOGGER.info("After seeding there are %d documents in the collection",
mongo[db_name][coll_name].count())
return 0
def mongo_validate_collections(mongo):
"""Validate the mongo collections, return 0 if all are valid."""
LOGGER.info("Validating all collections")
invalid_colls = []
ebusy_colls = []
for db_name in mongo.database_names():
for coll in mongo[db_name].list_collections(filter={"type": "collection"}):
coll_name = coll["name"]
res = mongo[db_name].command({"validate": coll_name, "full": True})
LOGGER.info("Validating %s %s: %s", db_name, coll_name, res)
ebusy = "EBUSY" in res["errors"] or "EBUSY" in res["warnings"]
if not res["valid"]:
invalid_colls.append(coll_name)
elif ebusy:
ebusy_colls.append(coll_name)
if ebusy_colls:
LOGGER.warning("EBUSY collections: %s", ebusy_colls)
if invalid_colls:
LOGGER.error("Invalid collections: %s", ebusy_colls)
return 0 if not invalid_colls else 1
def mongo_validate_canary(mongo, db_name, coll_name, doc):
"""Validate a canary document, return 0 if the document exists."""
if not doc:
return 0
LOGGER.info("Validating canary document using %s.%s.find_one(%s)", db_name, coll_name, doc)
return 0 if mongo[db_name][coll_name].find_one(doc) else 1
def mongo_insert_canary(mongo, db_name, coll_name, doc):
"""Insert a canary document with 'j' True, return 0 if successful."""
LOGGER.info("Inserting canary document using %s.%s.insert_one(%s)", db_name, coll_name, doc)
coll = mongo[db_name][coll_name].with_options(
write_concern=pymongo.write_concern.WriteConcern(j=True))
res = coll.insert_one(doc)
return 0 if res.inserted_id else 1
def new_resmoke_config(config_file, new_config_file, test_data, eval_str=""):
"""Create 'new_config_file', from 'config_file', with an update from 'test_data'."""
new_config = {
"executor": {
"config": {"shell_options": {"eval": eval_str, "global_vars": {"TestData": test_data}}}
}
}
with open(config_file, "r") as yaml_stream:
config = yaml.load(yaml_stream)
config.update(new_config)
with open(new_config_file, "w") as yaml_stream:
yaml.safe_dump(config, yaml_stream)
def resmoke_client( # pylint: disable=too-many-arguments
work_dir, mongo_path, host_port, js_test, resmoke_suite, repeat_num=1, no_wait=False,
log_file=None):
"""Start resmoke client from work_dir, connecting to host_port and executes js_test."""
log_output = ">> {} 2>&1".format(log_file) if log_file else ""
cmds = ("cd {}; "
"python buildscripts/resmoke.py"
" --mongo {}"
" --suites {}"
" --shellConnString mongodb://{}"
" --continueOnFailure"
" --repeat {}"
" {}"
" {}".format(
pipes.quote(work_dir), pipes.quote(mongo_path), pipes.quote(resmoke_suite),
host_port, repeat_num, pipes.quote(js_test), log_output))
ret, output = None, None
if no_wait:
Processes.create(cmds)
else:
ret, output = execute_cmd(cmds, use_file=True)
return ret, output
def main(): # pylint: disable=too-many-branches,too-many-locals,too-many-statements
"""Execute Main program."""
# pylint: disable=global-statement
global REPORT_JSON
global REPORT_JSON_FILE
global REPORT_JSON_SUCCESS
global EXIT_YML_FILE
global EXIT_YML
# pylint: enable=global-statement
atexit.register(exit_handler)
register_signal_handler(dump_stacks_and_exit)
parser = optparse.OptionParser(usage="""
%prog [options]
MongoDB Powercycle test
Examples:
Server is running as single node replica set connected to mFi mPower, outlet1:
python powertest.py
--sshUserHost 10.4.1.54
--rootDir pt-mmap
--replSet power
--crashMethod mpower
--crashOption output1
--sshCrashUserHost admin@10.4.100.2
--sshCrashOption "-oKexAlgorithms=+diffie-hellman-group1-sha1 -i /Users/jonathan/.ssh/mFi.pem"
--mongodOptions "--storageEngine mmapv1"
Linux server running in AWS, testing nojournal:
python powertest.py
--sshUserHost ec2-user@52.4.173.196
--sshConnection "-i $HOME/.ssh/JAkey.pem"
--rootDir pt-nojournal
--mongodOptions "--nojournal"
""")
test_options = optparse.OptionGroup(parser, "Test Options")
crash_options = optparse.OptionGroup(parser, "Crash Options")
mongodb_options = optparse.OptionGroup(parser, "MongoDB Options")
mongod_options = optparse.OptionGroup(parser, "mongod Options")
client_options = optparse.OptionGroup(parser, "Client Options")
program_options = optparse.OptionGroup(parser, "Program Options")
# Test options
test_options.add_option("--sshUserHost", dest="ssh_user_host",
help="Server ssh user/host, i.e., user@host (REQUIRED)", default=None)
default_ssh_connection_options = ("-o ServerAliveCountMax=10"
" -o ServerAliveInterval=6"
" -o StrictHostKeyChecking=no"
" -o ConnectTimeout=10"
" -o ConnectionAttempts=20")
test_options.add_option("--sshConnection", dest="ssh_connection_options",
help="Server ssh additional connection options, i.e., '-i ident.pem'"
" which are added to '{}'".format(default_ssh_connection_options),
default=None)
test_options.add_option("--testLoops", dest="num_loops",
help="Number of powercycle loops to run [default: %default]",
type="int", default=10)
test_options.add_option("--testTime", dest="test_time",
help="Time to run test (in seconds), overrides --testLoops", type="int",
default=0)
test_options.add_option("--rsync", dest="rsync_data",
help="Rsync data directory between mongod stop and start",
action="store_true", default=False)
test_options.add_option("--rsyncExcludeFiles", dest="rsync_exclude_files",
help="Files excluded from rsync of the data directory", action="append",
default=None)
test_options.add_option("--backupPathBefore", dest="backup_path_before",
help="Path where the db_path is backed up before crash recovery,"
" defaults to '<rootDir>/data-beforerecovery'", default=None)
test_options.add_option("--backupPathAfter", dest="backup_path_after",
help="Path where the db_path is backed up after crash recovery,"
" defaults to '<rootDir>/data-afterrecovery'", default=None)
validate_locations = ["local", "remote"]
test_options.add_option("--validate", dest="validate_collections",
help="Run validate on all collections after mongod restart after"
" a powercycle. Choose from {} to specify where the"
" validate runs.".format(validate_locations),
choices=validate_locations, default=None)
canary_locations = ["local", "remote"]
test_options.add_option("--canary", dest="canary",
help="Generate and validate canary document between powercycle"
" events. Choose from {} to specify where the canary is"
" generated from. If the 'crashMethod' is not 'internal"
" then this option must be 'local'.".format(canary_locations),
choices=canary_locations, default=None)
test_options.add_option("--docForCanary", dest="canary_doc", help=optparse.SUPPRESS_HELP,
default="")
test_options.add_option("--seedDocNum", dest="seed_doc_num",
help="Number of documents to seed the default collection [default:"
" %default]", type="int", default=0)
test_options.add_option("--dbName", dest="db_name", help=optparse.SUPPRESS_HELP,
default="power")
test_options.add_option("--collectionName", dest="collection_name", help=optparse.SUPPRESS_HELP,
default="cycle")
test_options.add_option("--writeConcern", dest="write_concern",
help="mongo (shell) CRUD client writeConcern, i.e.,"
" '{\"w\": \"majority\"}' [default: '%default']", default="{}")
test_options.add_option("--readConcernLevel", dest="read_concern_level",
help="mongo (shell) CRUD client readConcernLevel, i.e.,"
"'majority'", default=None)
# Crash options
crash_methods = ["aws_ec2", "internal", "kill", "mpower"]
crash_options.add_option("--crashMethod", dest="crash_method", choices=crash_methods,
help="Crash methods: {} [default: '%default']."
" Select 'aws_ec2' to force-stop/start an AWS instance."
" Select 'internal' to crash the remote server through an"
" internal command, i.e., sys boot (Linux) or notmyfault (Windows)."
" Select 'kill' to perform an unconditional kill of mongod,"
" which will keep the remote server running."
" Select 'mpower' to use the mFi mPower to cutoff power to"
" the remote server.".format(crash_methods), default="internal")
aws_address_types = [
"private_ip_address", "public_ip_address", "private_dns_name", "public_dns_name"
]
crash_options.add_option("--crashOption", dest="crash_option",
help="Secondary argument for the following --crashMethod:"
" 'aws_ec2': specify EC2 'address_type', which is one of {} and"
" defaults to 'public_ip_address'."
" 'mpower': specify output<num> to turn"
" off/on, i.e., 'output1' (REQUIRED)."
" 'internal': for Windows, optionally specify a crash method,"
" i.e., 'notmyfault/notmyfaultc64.exe"
" -accepteula crash 1'".format(aws_address_types), default=None)
crash_options.add_option("--instanceId", dest="instance_id",
help="The instance ID of an AWS EC2 host. If specified, this instance"
" will be started after a crash, if it is not in a running state."
" This is required if --crashOption is 'aws_ec2'.", default=None)
crash_options.add_option("--crashWaitTime", dest="crash_wait_time",
help="Time, in seconds, to wait before issuing crash [default:"
" %default]", type="int", default=30)
crash_options.add_option("--jitterForCrashWaitTime", dest="crash_wait_time_jitter",
help="The maximum time, in seconds, to be added to --crashWaitTime,"
" as a uniform distributed random value, [default: %default]",
type="int", default=10)
crash_options.add_option("--sshCrashUserHost", dest="ssh_crash_user_host",
help="The crash host's user@host for performing the crash.",
default=None)
crash_options.add_option("--sshCrashOption", dest="ssh_crash_option",
help="The crash host's ssh connection options, i.e., '-i ident.pem'",
default=None)
# MongoDB options
mongodb_options.add_option("--downloadUrl", dest="tarball_url",
help="URL of tarball to test, if unspecifed latest tarball will be"
" used", default="latest")
mongodb_options.add_option("--rootDir", dest="root_dir",
help="Root directory, on remote host, to install tarball and data"
" directory [default: 'mongodb-powertest-<epochSecs>']",
default=None)
mongodb_options.add_option("--mongodbBinDir", dest="mongodb_bin_dir",
help="Directory, on remote host, containing mongoDB binaries,"
" overrides bin from tarball in --downloadUrl", default=None)
mongodb_options.add_option("--dbPath", dest="db_path",
help="Data directory to use, on remote host, if unspecified"
" it will be '<rootDir>/data/db'", default=None)
mongodb_options.add_option("--logPath", dest="log_path",
help="Log path, on remote host, if unspecified"
" it will be '<rootDir>/log/mongod.log'", default=None)
# mongod options
mongod_options.add_option("--replSet", dest="repl_set",
help="Name of mongod single node replica set, if unpsecified mongod"
" defaults to standalone node", default=None)
# The current host used to start and connect to mongod. Not meant to be specified
# by the user.
mongod_options.add_option("--mongodHost", dest="host", help=optparse.SUPPRESS_HELP,
default=None)
# The current port used to start and connect to mongod. Not meant to be specified
# by the user.
mongod_options.add_option("--mongodPort", dest="port", help=optparse.SUPPRESS_HELP, type="int",
default=None)
# The ports used on the 'server' side when in standard or secret mode.
mongod_options.add_option("--mongodUsablePorts", dest="usable_ports", nargs=2,
help="List of usable ports to be used by mongod for"
" standard and secret modes, [default: %default]", type="int",
default=[27017, 37017])
mongod_options.add_option("--mongodOptions", dest="mongod_options",
help="Additional mongod options", default="")
mongod_options.add_option("--fcv", dest="fcv_version",
help="Set the FeatureCompatibilityVersion of mongod.", default=None)
mongod_options.add_option("--removeLockFile", dest="remove_lock_file",
help="If specified, the mongod.lock file will be deleted after a"
" powercycle event, before mongod is started. This is a"
" workaround for mongod failing start with MMAPV1 (See"
" SERVER-15109).", action="store_true", default=False)
# Client options
mongo_path = distutils.spawn.find_executable("mongo",
os.getcwd() + os.pathsep + os.environ["PATH"])
client_options.add_option("--mongoPath", dest="mongo_path",
help="Path to mongo (shell) executable, if unspecifed, mongo client"
" is launched from the current directory.", default=mongo_path)
client_options.add_option("--mongoRepoRootDir", dest="mongo_repo_root_dir",
help="Root directory of mongoDB repository, defaults to current"
" directory.", default=None)
client_options.add_option("--crudClient", dest="crud_client",
help="The path to the CRUD client script on the local host"
" [default: '%default'].", default="jstests/hooks/crud_client.js")
with_external_server = "buildscripts/resmokeconfig/suites/with_external_server.yml"
client_options.add_option("--configCrudClient", dest="config_crud_client",
help="The path to the CRUD client configuration YML file on the"
" local host. This is the resmoke.py suite file. If unspecified,"
" a default configuration YML file (%default) will be used that"
" provides a mongo (shell) DB connection to a running mongod.",
default=with_external_server)
client_options.add_option("--numCrudClients", dest="num_crud_clients",
help="The number of concurrent CRUD clients to run"
" [default: '%default'].", type="int", default=1)
client_options.add_option("--numFsmClients", dest="num_fsm_clients",
help="The number of concurrent FSM clients to run"
" [default: '%default'].", type="int", default=0)
client_options.add_option("--fsmWorkloadFiles", dest="fsm_workload_files",
help="A list of the FSM workload files to execute. More than one"
" file can be specified either in a comma-delimited string,"
" or by specifying this option more than once. If unspecified,"
" then all FSM workload files are executed.", action="append",
default=[])
client_options.add_option("--fsmWorkloadBlacklistFiles", dest="fsm_workload_blacklist_files",
help="A list of the FSM workload files to blacklist. More than one"
" file can be specified either in a comma-delimited string,"
" or by specifying this option more than once. Note the"
" file name is the basename, i.e., 'distinct.js'.", action="append",
default=[])
# Program options
program_options.add_option("--configFile", dest="config_file",
help="YAML configuration file of program options."
" Option values are mapped to command line option names."
" The command line option overrides any specified options"
" from this file.", default=None)
program_options.add_option("--saveConfigOptions", dest="save_config_options",
help="Save the program options to a YAML configuration file."
" If this options is specified the program only saves"
" the configuration file and exits.", default=None)
program_options.add_option("--reportJsonFile", dest="report_json_file",
help="Create or update the specified report file upon program"
" exit.", default=None)
program_options.add_option("--exitYamlFile", dest="exit_yml_file",
help="If specified, create a YAML file on exit containing"
" exit code.", default=None)
program_options.add_option("--remotePython", dest="remote_python",
help="The python intepreter to use on the remote host"
" [default: '%default']."
" To be able to use a python virtual environment,"
" which has already been provisioned on the remote"
" host, specify something similar to this:"
" 'source venv/bin/activate; python'", default="python")
program_options.add_option("--remoteSudo", dest="remote_sudo",
help="Use sudo on the remote host for priveleged operations."
" [default: %default]."
" For non-Windows systems, in order to perform privileged"
" operations on the remote host, specify this, if the"
" remote user is not able to perform root operations.",
action="store_true", default=False)
log_levels = ["debug", "info", "warning", "error"]
program_options.add_option("--logLevel", dest="log_level", choices=log_levels,
help="The log level. Accepted values are: {}."
" [default: '%default'].".format(log_levels), default="info")
program_options.add_option("--logFile", dest="log_file",
help="The destination file for the log output. Defaults to stdout.",
default=None)
program_options.add_option("--version", dest="version", help="Display this program's version",
action="store_true", default=False)
# Remote options, include commands and options sent from client to server under test.
# These are 'internal' options, not meant to be directly specifed.
# More than one remote operation can be provided and they are specified in the program args.
program_options.add_option("--remoteOperation", dest="remote_operation",
help=optparse.SUPPRESS_HELP, action="store_true", default=False)
program_options.add_option("--rsyncDest", dest="rsync_dest", help=optparse.SUPPRESS_HELP,
default=None)
parser.add_option_group(test_options)
parser.add_option_group(crash_options)
parser.add_option_group(client_options)
parser.add_option_group(mongodb_options)
parser.add_option_group(mongod_options)
parser.add_option_group(program_options)
options, args = parser.parse_args()
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=logging.ERROR,
filename=options.log_file)
logging.getLogger(__name__).setLevel(options.log_level.upper())
logging.Formatter.converter = time.gmtime
LOGGER.info("powertest.py invocation: %s", " ".join(sys.argv))
# Command line options override the config file options.
config_options = None
if options.config_file:
with open(options.config_file) as ystream:
config_options = yaml.safe_load(ystream)
LOGGER.info("Loading config file %s with options %s", options.config_file, config_options)
# Load the options specified in the config_file
parser.set_defaults(**config_options)
options, args = parser.parse_args()
# Disable this option such that the remote side does not load a config_file.
options.config_file = None
config_options["config_file"] = None
if options.save_config_options:
# Disable this option such that the remote side does not save the config options.
save_config_options = options.save_config_options
options.save_config_options = None
save_options = {}
for opt_group in parser.option_groups:
for opt in opt_group.option_list:
if getattr(options, opt.dest, None) != opt.default:
save_options[opt.dest] = getattr(options, opt.dest, None)
LOGGER.info("Config options being saved %s", save_options)
with open(save_config_options, "w") as ystream:
yaml.safe_dump(save_options, ystream, default_flow_style=False)
sys.exit(0)
script_name = os.path.basename(__file__)
# Print script name and version.
if options.version:
print("{}:{}".format(script_name, __version__))
sys.exit(0)
if options.exit_yml_file:
EXIT_YML_FILE = options.exit_yml_file
# Disable this option such that the remote side does not generate exit_yml_file
options.exit_yml_file = None
if options.report_json_file:
REPORT_JSON_FILE = options.report_json_file
if REPORT_JSON_FILE and os.path.exists(REPORT_JSON_FILE):
with open(REPORT_JSON_FILE) as jstream:
REPORT_JSON = json.load(jstream)
else:
REPORT_JSON = {
"failures":
0, "results": [{
"status": "fail", "test_file": __name__, "exit_code": 0, "elapsed": 0,
"start": int(time.time()), "end": int(time.time())
}]
}
LOGGER.debug("Updating/creating report JSON %s", REPORT_JSON)
# Disable this option such that the remote side does not generate report.json
options.report_json_file = None
# Setup the crash options
if options.crash_method == "mpower" and options.crash_option is None:
parser.error("Missing required argument --crashOption for crashMethod '{}'".format(
options.crash_method))
if options.crash_method == "aws_ec2":
if not options.instance_id:
parser.error("Missing required argument --instanceId for crashMethod '{}'".format(
options.crash_method))
address_type = "public_ip_address"
if options.crash_option:
address_type = options.crash_option
if address_type not in aws_address_types:
parser.error("Invalid crashOption address_type '{}' specified for crashMethod"
" 'aws_ec2', specify one of {}".format(address_type, aws_address_types))
# Initialize the mongod options
# Note - We use posixpath for Windows client to Linux server scenarios.
if not options.root_dir:
options.root_dir = "mongodb-powertest-{}".format(int(time.time()))
if not options.db_path:
options.db_path = posixpath.join(options.root_dir, "data", "db")
if not options.log_path:
options.log_path = posixpath.join(options.root_dir, "log", "mongod.log")
mongod_options_map = parse_options(options.mongod_options)
set_fcv_cmd = "set_fcv" if options.fcv_version is not None else ""
remove_lock_file_cmd = "remove_lock_file" if options.remove_lock_file else ""
# Error out earlier if these options are not properly specified
write_concern = yaml.safe_load(options.write_concern)
options.canary_doc = yaml.safe_load(options.canary_doc)
# Invoke remote_handler if remote_operation is specified.
# The remote commands are program args.
if options.remote_operation:
ret = remote_handler(options, args)
# Exit here since the local operations are performed after this.
local_exit(ret)
# Required option for non-remote commands.
if options.ssh_user_host is None and not options.remote_operation:
parser.error("Missing required argument --sshUserHost")
secret_port = options.usable_ports[1]
standard_port = options.usable_ports[0]
seed_docs = "seed_docs" if options.seed_doc_num else ""
if options.rsync_data:
rsync_cmd = "rsync_data"
backup_path_before = options.backup_path_before
if not backup_path_before:
backup_path_before = "{}/data-beforerecovery".format(options.root_dir)
backup_path_after = options.backup_path_after
if not backup_path_after:
backup_path_after = "{}/data-afterrecovery".format(options.root_dir)
else:
rsync_cmd = ""
rsync_opt = ""
# Setup the mongo client, mongo_path is required if there are local clients.
if (options.num_crud_clients > 0 or options.num_fsm_clients > 0
or options.validate_collections == "local"):
if not options.mongo_path:
LOGGER.error("mongoPath must be specified")
local_exit(1)
if not os.path.isfile(options.mongo_path):
LOGGER.error("mongoPath %s does not exist", options.mongo_path)
local_exit(1)
mongo_path = os.path.abspath(os.path.normpath(options.mongo_path))
# Setup the CRUD & FSM clients.
if not os.path.isfile(options.config_crud_client):
LOGGER.error("configCrudClient %s does not exist", options.config_crud_client)
local_exit(1)
with_external_server = "buildscripts/resmokeconfig/suites/with_external_server.yml"
fsm_client = "jstests/libs/fsm_serial_client.js"
fsm_workload_files = []
for fsm_workload_file in options.fsm_workload_files:
fsm_workload_files += fsm_workload_file.replace(" ", "").split(",")
fsm_workload_blacklist_files = []
for fsm_workload_blacklist_file in options.fsm_workload_blacklist_files:
fsm_workload_blacklist_files += fsm_workload_blacklist_file.replace(" ", "").split(",")
read_concern_level = options.read_concern_level
if write_concern and not read_concern_level:
read_concern_level = "local"
crud_test_data = {}
if read_concern_level:
crud_test_data["defaultReadConcernLevel"] = read_concern_level
if write_concern:
crud_test_data["defaultWriteConcern"] = write_concern
if read_concern_level or write_concern:
eval_str = "load('jstests/libs/override_methods/set_read_and_write_concerns.js');"
else:
eval_str = ""
fsm_test_data = copy.deepcopy(crud_test_data)
fsm_test_data["fsmDbBlacklist"] = [options.db_name]
if fsm_workload_files:
fsm_test_data["workloadFiles"] = fsm_workload_files
if fsm_workload_blacklist_files:
fsm_test_data["workloadBlacklistFiles"] = fsm_workload_blacklist_files
crud_test_data["dbName"] = options.db_name
# Setup the mongo_repo_root.
if options.mongo_repo_root_dir:
mongo_repo_root_dir = options.mongo_repo_root_dir
else:
mongo_repo_root_dir = os.getcwd()
if not os.path.isdir(mongo_repo_root_dir):
LOGGER.error("mongoRepoRoot %s does not exist", mongo_repo_root_dir)
local_exit(1)
# Setup the validate_collections option.
if options.validate_collections == "remote":
validate_collections_cmd = "validate_collections"
else:
validate_collections_cmd = ""
# Setup the validate_canary option.
if options.canary and "nojournal" in mongod_options_map:
LOGGER.error("Cannot create and validate canary documents if the mongod option"
" '--nojournal' is used.")
local_exit(1)
internal_crash_options = ["internal", "kill"]
if options.canary == "remote" and options.crash_method not in internal_crash_options:
parser.error("The option --canary can only be specified as 'remote' if --crashMethod"
" is one of {}".format(internal_crash_options))
orig_canary_doc = canary_doc = ""
validate_canary_cmd = ""
# Set the Pymongo connection timeout to 1 hour for canary insert & validation.
one_hour_ms = 60 * 60 * 1000
# The remote mongod host comes from the ssh_user_host,
# which may be specified as user@host.
ssh_user_host = options.ssh_user_host
ssh_user, ssh_host = get_user_host(ssh_user_host)
mongod_host = ssh_host
ssh_connection_options = "{} {}".format(default_ssh_connection_options,
options.ssh_connection_options
if options.ssh_connection_options else "")
# For remote operations requiring sudo, force pseudo-tty allocation,
# see https://stackoverflow.com/questions/10310299/proper-way-to-sudo-over-ssh.
# Note - the ssh option RequestTTY was added in OpenSSH 5.9, so we use '-tt'.
ssh_options = "-tt" if options.remote_sudo else None
# Establish EC2 connection if an instance_id is specified.
if options.instance_id:
ec2 = aws_ec2.AwsEc2() # pylint: disable=undefined-variable
# Determine address_type if not using 'aws_ec2' crash_method.
if options.crash_method != "aws_ec2":
address_type = "public_ip_address"
ret, aws_status = ec2.control_instance(mode="status", image_id=options.instance_id)
if not is_instance_running(ret, aws_status):
LOGGER.error("AWS instance is not running: %d %s", ret, aws_status)
local_exit(1)
if (ssh_host == aws_status.private_ip_address
or ssh_host == aws_status.private_dns_name):
address_type = "private_ip_address"
# Instantiate the local handler object.
local_ops = LocalToRemoteOperations(user_host=ssh_user_host,
ssh_connection_options=ssh_connection_options,
ssh_options=ssh_options, use_shell=True)
verify_remote_access(local_ops)
# Bootstrap the remote host with this script.
ret, output = local_ops.copy_to(__file__)
if ret:
LOGGER.error("Cannot access remote system %s", output)
local_exit(1)
# Pass client_args to the remote script invocation.
client_args = ""
for option in parser._get_all_options(): # pylint: disable=protected-access
if option.dest:
option_value = getattr(options, option.dest, None)
if option_value != option.default:
# The boolean options do not require the option_value.
if isinstance(option_value, bool):
option_value = ""
# Quote the non-default option values from the invocation of this script,
# if they have spaces, or quotes, such that they can be safely passed to the
# remote host's invocation of this script.
elif isinstance(option_value, str) and re.search("\"|'| ", option_value):
option_value = "'{}'".format(option_value)
# The tuple, list or set options need to be changed to a string.
elif isinstance(option_value, (tuple, list, set)):
option_value = " ".join(map(str, option_value))
client_args = "{} {} {}".format(client_args, option.get_opt_string(), option_value)
LOGGER.info("%s %s", __file__, client_args)
# Remote install of MongoDB.
ret, output = call_remote_operation(local_ops, options.remote_python, script_name, client_args,
"--remoteOperation install_mongod")
LOGGER.info("****install_mongod: %d %s****", ret, output)
if ret:
local_exit(ret)
# test_time option overrides num_loops.
if options.test_time:
options.num_loops = 999999
else:
options.test_time = 999999
loop_num = 0
start_time = int(time.time())
test_time = 0
# ======== Main loop for running the powercycle test========:
# 1. Rsync the database (optional, post-crash, pre-recovery)
# 2. Start mongod on the secret port and wait for it to recover
# 3 Validate collections (optional)
# 4. Validate canary (optional)
# 5. Stop mongod
# 6. Rsync the database (optional, post-recovery)
# 7. Start mongod on the standard port
# 8. Start mongo (shell) & FSM clients
# 9. Generate canary document (optional)
# 10. Crash the server
# 11. Exit loop if one of these occurs:
# a. Loop time or loop number exceeded
# b. Any step fails
# =========
while True:
loop_num += 1
LOGGER.info("****Starting test loop %d test time %d seconds****", loop_num, test_time)
temp_client_files = []
validate_canary_local = False
if options.canary and loop_num > 1:
if options.canary == "remote":
canary_opt = "--docForCanary \"{}\"".format(canary_doc)
validate_canary_cmd = "validate_canary" if options.canary else ""
else:
validate_canary_local = True
else:
canary_opt = ""
# Since rsync requires Posix style paths, we do not use os.path.join to
# construct the rsync destination directory.
if rsync_cmd:
rsync_opt = "--rsyncDest {}".format(backup_path_before)
# Optionally, rsync the pre-recovery database.
# Start monogd on the secret port.
# Optionally validate collections, validate the canary and seed the collection.
remote_operation = ("--remoteOperation"
" {rsync_opt}"
" {canary_opt}"
" --mongodHost {host}"
" --mongodPort {port}"
" {rsync_cmd}"
" {remove_lock_file_cmd}"
" start_mongod"
" {set_fcv_cmd}"
" {validate_collections_cmd}"
" {validate_canary_cmd}"
" {seed_docs}").format(
rsync_opt=rsync_opt, canary_opt=canary_opt, host=mongod_host,
port=secret_port, rsync_cmd=rsync_cmd,
remove_lock_file_cmd=remove_lock_file_cmd, set_fcv_cmd=set_fcv_cmd
if loop_num == 1 else "",
validate_collections_cmd=validate_collections_cmd,
validate_canary_cmd=validate_canary_cmd, seed_docs=seed_docs
if loop_num == 1 else "")
ret, output = call_remote_operation(local_ops, options.remote_python, script_name,
client_args, remote_operation)
rsync_text = "rsync_data beforerecovery & " if options.rsync_data else ""
LOGGER.info("****%sstart mongod: %d %s****", rsync_text, ret, output)
if ret:
local_exit(ret)
# Optionally validate canary document locally.
if validate_canary_local:
mongo = pymongo.MongoClient(**get_mongo_client_args(
host=mongod_host, port=secret_port, server_selection_timeout_ms=one_hour_ms,
socket_timeout_ms=one_hour_ms))
ret = mongo_validate_canary(mongo, options.db_name, options.collection_name, canary_doc)
LOGGER.info("Local canary validation: %d", ret)
if ret:
local_exit(ret)
# Optionally, run local validation of collections.
if options.validate_collections == "local":
host_port = "{}:{}".format(mongod_host, secret_port)
new_config_file = NamedTempFile.create(suffix=".yml", directory="tmp")
temp_client_files.append(new_config_file)
validation_test_data = {"skipValidationOnNamespaceNotFound": True}
new_resmoke_config(with_external_server, new_config_file, validation_test_data)
ret, output = resmoke_client(mongo_repo_root_dir, mongo_path, host_port,
"jstests/hooks/run_validate_collections.js",
new_config_file)
LOGGER.info("Local collection validation: %d %s", ret, output)
if ret:
local_exit(ret)
# Shutdown mongod on secret port.
remote_op = ("--remoteOperation" " --mongodPort {}" " shutdown_mongod").format(secret_port)
ret, output = call_remote_operation(local_ops, options.remote_python, script_name,
client_args, remote_op)
LOGGER.info("****shutdown_mongod: %d %s****", ret, output)
if ret:
local_exit(ret)
# Since rsync requires Posix style paths, we do not use os.path.join to
# construct the rsync destination directory.
if rsync_cmd:
rsync_opt = "--rsyncDest {}".format(backup_path_after)
# Optionally, rsync the post-recovery database.
# Start monogd on the standard port.
remote_op = ("--remoteOperation"
" {}"
" --mongodHost {}"
" --mongodPort {}"
" {}"
" start_mongod").format(rsync_opt, mongod_host, standard_port, rsync_cmd)
ret, output = call_remote_operation(local_ops, options.remote_python, script_name,
client_args, remote_op)
rsync_text = "rsync_data afterrecovery & " if options.rsync_data else ""
LOGGER.info("****%s start mongod: %d %s****", rsync_text, ret, output)
if ret:
local_exit(ret)
# Start CRUD clients
host_port = "{}:{}".format(mongod_host, standard_port)
for i in xrange(options.num_crud_clients):
if options.config_crud_client == with_external_server:
crud_config_file = NamedTempFile.create(suffix=".yml", directory="tmp")
crud_test_data["collectionName"] = "{}-{}".format(options.collection_name, i)
new_resmoke_config(with_external_server, crud_config_file, crud_test_data, eval_str)
else:
crud_config_file = options.config_crud_client
_, _ = resmoke_client(work_dir=mongo_repo_root_dir, mongo_path=mongo_path,
host_port=host_port, js_test=options.crud_client,
resmoke_suite=crud_config_file, repeat_num=100, no_wait=True,
log_file="crud_{}.log".format(i))
if options.num_crud_clients:
LOGGER.info("****Started %d CRUD client(s)****", options.num_crud_clients)
# Start FSM clients
for i in xrange(options.num_fsm_clients):
fsm_config_file = NamedTempFile.create(suffix=".yml", directory="tmp")
fsm_test_data["dbNamePrefix"] = "fsm-{}".format(i)
# Do collection validation only for the first FSM client.
fsm_test_data["validateCollections"] = True if i == 0 else False
new_resmoke_config(with_external_server, fsm_config_file, fsm_test_data, eval_str)
_, _ = resmoke_client(work_dir=mongo_repo_root_dir, mongo_path=mongo_path,
host_port=host_port, js_test=fsm_client,
resmoke_suite=fsm_config_file, repeat_num=100, no_wait=True,
log_file="fsm_{}.log".format(i))
if options.num_fsm_clients:
LOGGER.info("****Started %d FSM client(s)****", options.num_fsm_clients)
# Crash the server. A pre-crash canary document is optionally written to the DB.
crash_canary = {}
if options.canary:
canary_doc = {"x": time.time()}
orig_canary_doc = copy.deepcopy(canary_doc)
mongo = pymongo.MongoClient(**get_mongo_client_args(
host=mongod_host, port=standard_port, server_selection_timeout_ms=one_hour_ms,
socket_timeout_ms=one_hour_ms))
crash_canary["function"] = mongo_insert_canary
crash_canary["args"] = [mongo, options.db_name, options.collection_name, canary_doc]
ret, output = crash_server_or_kill_mongod(options, crash_canary, standard_port, local_ops,
script_name, client_args)
# For internal crashes 'ret' is non-zero, because the ssh session unexpectedly terminates.
if options.crash_method != "internal" and ret:
raise Exception("Crash of server failed: {}".format(output))
if options.crash_method != "kill":
# Wait a bit after sending command to crash the server to avoid connecting to the
# server before the actual crash occurs.
time.sleep(10)
# Kill any running clients and cleanup temporary files.
Processes.kill_all()
for temp_file in temp_client_files:
NamedTempFile.delete(temp_file)
instance_running = True
if options.instance_id:
ret, aws_status = ec2.control_instance(mode="status", image_id=options.instance_id)
LOGGER.info("AWS EC2 instance status: %d %s****", ret, aws_status)
instance_running = is_instance_running(ret, aws_status)
# The EC2 instance address changes if the instance is restarted.
if options.crash_method == "aws_ec2" or not instance_running:
ret, aws_status = ec2.control_instance(mode="start", image_id=options.instance_id,
wait_time_secs=600, show_progress=True)
LOGGER.info("Start instance: %d %s****", ret, aws_status)
if ret:
raise Exception("Start instance failed: {}".format(aws_status))
if not hasattr(aws_status, address_type):
raise Exception("Cannot determine address_type {} from AWS EC2 status {}".format(
address_type, aws_status))
ssh_host = getattr(aws_status, address_type)
if ssh_user is None:
ssh_user_host = ssh_host
else:
ssh_user_host = "{}@{}".format(ssh_user, ssh_host)
mongod_host = ssh_host
# Reestablish remote access after crash.
local_ops = LocalToRemoteOperations(user_host=ssh_user_host,
ssh_connection_options=ssh_connection_options,
ssh_options=ssh_options, use_shell=True)
verify_remote_access(local_ops)
canary_doc = copy.deepcopy(orig_canary_doc)
test_time = int(time.time()) - start_time
LOGGER.info("****Completed test loop %d test time %d seconds****", loop_num, test_time)
if loop_num == options.num_loops or test_time >= options.test_time:
break
REPORT_JSON_SUCCESS = True
local_exit(0)
if __name__ == "__main__":
main()
| 43.010936
| 113
| 0.613782
|
61b09e993c2d7f01b552752e9be78daa17510132
| 1,825
|
py
|
Python
|
Command Line Interface Version/generatorCore.py
|
mmble/password_generator
|
54c9c6b82ca29ad5d5840b3bcf44e638a1581198
|
[
"Apache-2.0"
] | 1
|
2019-09-25T13:33:22.000Z
|
2019-09-25T13:33:22.000Z
|
Command Line Interface Version/generatorCore.py
|
mmacq/password_generator
|
54c9c6b82ca29ad5d5840b3bcf44e638a1581198
|
[
"Apache-2.0"
] | null | null | null |
Command Line Interface Version/generatorCore.py
|
mmacq/password_generator
|
54c9c6b82ca29ad5d5840b3bcf44e638a1581198
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
debugmode = False
def debug(str):
if debugmode:
for i in str:
print(i)
setsOfCharacters = {
3: string.ascii_lowercase,
2: string.digits,
1: string.punctuation,
0: string.ascii_uppercase,
}
def printWarning(infoString=""):
print("[Warning] "+infoString)
def generateRandomString(str, len):
return "".join(random.SystemRandom().choice(str) for _ in range(len))
# Function for generating string with random character
def generateRandomCharacters(stringLength=0, options=0):
def generateFormula(stringLength, sets=[]):
numberOfSets = len(sets)
setsTogether = "".join(sets)
eachSetsCharacter = []
if stringLength < numberOfSets:
printWarning(
"Length of the password is to short to include all types of the characters.")
return generateRandomString(setsTogether, stringLength)
else:
stringLength -= len(sets)
tempStringLength = stringLength
for i in range(0, numberOfSets):
debug([i,random.randint(0, len(sets[i])), sets[i][0], tempStringLength])
randomCharacter = random.SystemRandom().choice(sets[i])
eachSetsCharacter.append([randomCharacter, random.randint(0,tempStringLength)])
tempStringLength += 1
debug([eachSetsCharacter])
result = generateRandomString(setsTogether,stringLength)
for i in eachSetsCharacter:
result = result[:i[1]] + i[0] + result[i[1]:]
debug([setsTogether, result])
return result
chosenStrings = []
tempOptions = options
for i in range(3, 0-1, -1):
if tempOptions >= 2 ** i:
tempOptions = tempOptions-2**i
chosenStrings.append(setsOfCharacters[i])
if len(chosenStrings) == 0:
print("You did not choose any set of characters.")
return generateFormula(stringLength, chosenStrings)
| 28.968254
| 87
| 0.688767
|
2a75bf3aba95545cee02082e6b02f77d45865b6d
| 155,320
|
py
|
Python
|
apps/rss_feeds/models.py
|
sictiru/NewsBlur
|
3fb3bd1962439d14ea725b34a3d8c7cedc569876
|
[
"MIT"
] | null | null | null |
apps/rss_feeds/models.py
|
sictiru/NewsBlur
|
3fb3bd1962439d14ea725b34a3d8c7cedc569876
|
[
"MIT"
] | null | null | null |
apps/rss_feeds/models.py
|
sictiru/NewsBlur
|
3fb3bd1962439d14ea725b34a3d8c7cedc569876
|
[
"MIT"
] | null | null | null |
import difflib
import requests
import datetime
import time
import random
import re
import math
import mongoengine as mongo
import zlib
import hashlib
import redis
import pymongo
import HTMLParser
import urlparse
from collections import defaultdict
from operator import itemgetter
from bson.objectid import ObjectId
from BeautifulSoup import BeautifulSoup
from pyes.exceptions import NotFoundException
# from nltk.collocations import TrigramCollocationFinder, BigramCollocationFinder, TrigramAssocMeasures, BigramAssocMeasures
from django.db import models
from django.db import IntegrityError
from django.conf import settings
from django.db.models.query import QuerySet
from django.db.utils import DatabaseError
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_str, smart_unicode
from mongoengine.queryset import OperationError, Q, NotUniqueError
from mongoengine.base import ValidationError
from vendor.timezones.utilities import localtime_for_timezone
from apps.rss_feeds.tasks import UpdateFeeds, PushFeeds, ScheduleCountTagsForUser
from apps.rss_feeds.text_importer import TextImporter
from apps.search.models import SearchStory, SearchFeed
from apps.statistics.rstats import RStats
from utils import json_functions as json
from utils import feedfinder_forman
from utils import feedfinder_pilgrim
from utils import urlnorm
from utils import log as logging
from utils.fields import AutoOneToOneField
from utils.feed_functions import levenshtein_distance
from utils.feed_functions import timelimit, TimeoutError
from utils.feed_functions import relative_timesince
from utils.feed_functions import seconds_timesince
from utils.story_functions import strip_tags, htmldiff, strip_comments, strip_comments__lxml
from utils.story_functions import prep_for_search
from utils.story_functions import create_imageproxy_signed_url
ENTRY_NEW, ENTRY_UPDATED, ENTRY_SAME, ENTRY_ERR = range(4)
class Feed(models.Model):
feed_address = models.URLField(max_length=764, db_index=True)
feed_address_locked = models.NullBooleanField(default=False, blank=True, null=True)
feed_link = models.URLField(max_length=1000, default="", blank=True, null=True)
feed_link_locked = models.BooleanField(default=False)
hash_address_and_link = models.CharField(max_length=64, unique=True)
feed_title = models.CharField(max_length=255, default="[Untitled]", blank=True, null=True)
is_push = models.NullBooleanField(default=False, blank=True, null=True)
active = models.BooleanField(default=True, db_index=True)
num_subscribers = models.IntegerField(default=-1)
active_subscribers = models.IntegerField(default=-1, db_index=True)
premium_subscribers = models.IntegerField(default=-1)
active_premium_subscribers = models.IntegerField(default=-1)
branch_from_feed = models.ForeignKey('Feed', blank=True, null=True, db_index=True)
last_update = models.DateTimeField(db_index=True)
next_scheduled_update = models.DateTimeField()
last_story_date = models.DateTimeField(null=True, blank=True)
fetched_once = models.BooleanField(default=False)
known_good = models.BooleanField(default=False)
has_feed_exception = models.BooleanField(default=False, db_index=True)
has_page_exception = models.BooleanField(default=False, db_index=True)
has_page = models.BooleanField(default=True)
exception_code = models.IntegerField(default=0)
errors_since_good = models.IntegerField(default=0)
min_to_decay = models.IntegerField(default=0)
days_to_trim = models.IntegerField(default=90)
creation = models.DateField(auto_now_add=True)
etag = models.CharField(max_length=255, blank=True, null=True)
last_modified = models.DateTimeField(null=True, blank=True)
stories_last_month = models.IntegerField(default=0)
average_stories_per_month = models.IntegerField(default=0)
last_load_time = models.IntegerField(default=0)
favicon_color = models.CharField(max_length=6, null=True, blank=True)
favicon_not_found = models.BooleanField(default=False)
s3_page = models.NullBooleanField(default=False, blank=True, null=True)
s3_icon = models.NullBooleanField(default=False, blank=True, null=True)
search_indexed = models.NullBooleanField(default=None, null=True, blank=True)
class Meta:
db_table="feeds"
ordering=["feed_title"]
# unique_together=[('feed_address', 'feed_link')]
def __unicode__(self):
if not self.feed_title:
self.feed_title = "[Untitled]"
self.save()
return "%s%s: %s - %s/%s/%s" % (
self.pk,
(" [B: %s]" % self.branch_from_feed.pk if self.branch_from_feed else ""),
self.feed_title,
self.num_subscribers,
self.active_subscribers,
self.active_premium_subscribers,
)
@property
def title(self):
title = self.feed_title or "[Untitled]"
if self.active_premium_subscribers >= 1:
title = "%s*" % title[:29]
return title
@property
def log_title(self):
return self.__unicode__()
@property
def permalink(self):
return "%s/site/%s/%s" % (settings.NEWSBLUR_URL, self.pk, slugify(self.feed_title.lower()[:50]))
@property
def favicon_url(self):
if settings.BACKED_BY_AWS['icons_on_s3'] and self.s3_icon:
return "https://s3.amazonaws.com/%s/%s.png" % (settings.S3_ICONS_BUCKET_NAME, self.pk)
return reverse('feed-favicon', kwargs={'feed_id': self.pk})
@property
def favicon_url_fqdn(self):
if settings.BACKED_BY_AWS['icons_on_s3'] and self.s3_icon:
return self.favicon_url
return "http://%s%s" % (
Site.objects.get_current().domain,
self.favicon_url
)
@property
def s3_pages_key(self):
return "%s.gz.html" % self.pk
@property
def s3_icons_key(self):
return "%s.png" % self.pk
@property
def unread_cutoff(self):
if self.active_premium_subscribers > 0:
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD)
return datetime.datetime.utcnow() - datetime.timedelta(days=settings.DAYS_OF_UNREAD_FREE)
@property
def story_hashes_in_unread_cutoff(self):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
current_time = int(time.time() + 60*60*24)
unread_cutoff = self.unread_cutoff.strftime('%s')
story_hashes = r.zrevrangebyscore('zF:%s' % self.pk, current_time, unread_cutoff)
return story_hashes
@classmethod
def generate_hash_address_and_link(cls, feed_address, feed_link):
if not feed_address: feed_address = ""
if not feed_link: feed_link = ""
return hashlib.sha1(feed_address+feed_link).hexdigest()
@property
def is_newsletter(self):
return self.feed_address.startswith('newsletter:') or self.feed_address.startswith('http://newsletter:')
def canonical(self, full=False, include_favicon=True):
feed = {
'id': self.pk,
'feed_title': self.feed_title,
'feed_address': self.feed_address,
'feed_link': self.feed_link,
'num_subscribers': self.num_subscribers,
'updated': relative_timesince(self.last_update),
'updated_seconds_ago': seconds_timesince(self.last_update),
'last_story_date': self.last_story_date,
'last_story_seconds_ago': seconds_timesince(self.last_story_date),
'stories_last_month': self.stories_last_month,
'average_stories_per_month': self.average_stories_per_month,
'min_to_decay': self.min_to_decay,
'subs': self.num_subscribers,
'is_push': self.is_push,
'is_newsletter': self.is_newsletter,
'fetched_once': self.fetched_once,
'search_indexed': self.search_indexed,
'not_yet_fetched': not self.fetched_once, # Legacy. Doh.
'favicon_color': self.favicon_color,
'favicon_fade': self.favicon_fade(),
'favicon_border': self.favicon_border(),
'favicon_text_color': self.favicon_text_color(),
'favicon_fetching': self.favicon_fetching,
'favicon_url': self.favicon_url,
's3_page': self.s3_page,
's3_icon': self.s3_icon,
}
if include_favicon:
try:
feed_icon = MFeedIcon.objects.get(feed_id=self.pk)
feed['favicon'] = feed_icon.data
except MFeedIcon.DoesNotExist:
pass
if self.has_page_exception or self.has_feed_exception:
feed['has_exception'] = True
feed['exception_type'] = 'feed' if self.has_feed_exception else 'page'
feed['exception_code'] = self.exception_code
elif full:
feed['has_exception'] = False
feed['exception_type'] = None
feed['exception_code'] = self.exception_code
if not self.has_page:
feed['disabled_page'] = True
if full:
feed['average_stories_per_month'] = self.average_stories_per_month
feed['tagline'] = self.data.feed_tagline
feed['feed_tags'] = json.decode(self.data.popular_tags) if self.data.popular_tags else []
feed['feed_authors'] = json.decode(self.data.popular_authors) if self.data.popular_authors else []
return feed
def save(self, *args, **kwargs):
if not self.last_update:
self.last_update = datetime.datetime.utcnow()
if not self.next_scheduled_update:
self.next_scheduled_update = datetime.datetime.utcnow()
self.fix_google_alerts_urls()
feed_address = self.feed_address or ""
feed_link = self.feed_link or ""
self.hash_address_and_link = self.generate_hash_address_and_link(feed_address, feed_link)
max_feed_title = Feed._meta.get_field('feed_title').max_length
if len(self.feed_title) > max_feed_title:
self.feed_title = self.feed_title[:max_feed_title]
max_feed_address = Feed._meta.get_field('feed_address').max_length
if len(feed_address) > max_feed_address:
self.feed_address = feed_address[:max_feed_address]
max_feed_link = Feed._meta.get_field('feed_link').max_length
if len(feed_link) > max_feed_link:
self.feed_link = feed_link[:max_feed_link]
try:
super(Feed, self).save(*args, **kwargs)
except IntegrityError, e:
logging.debug(" ---> ~FRFeed save collision (%s), checking dupe hash..." % e)
feed_address = self.feed_address or ""
feed_link = self.feed_link or ""
hash_address_and_link = self.generate_hash_address_and_link(feed_address, feed_link)
logging.debug(" ---> ~FRNo dupes, checking hash collision: %s" % hash_address_and_link)
duplicate_feeds = Feed.objects.filter(hash_address_and_link=hash_address_and_link)
if not duplicate_feeds:
duplicate_feeds = Feed.objects.filter(feed_address=self.feed_address,
feed_link=self.feed_link)
if not duplicate_feeds:
# Feed has been deleted. Just ignore it.
logging.debug(" ***> Changed to: %s - %s: %s" % (self.feed_address, self.feed_link, duplicate_feeds))
logging.debug(' ***> [%-30s] Feed deleted (%s).' % (self.log_title[:30], self.pk))
return
for duplicate_feed in duplicate_feeds:
if duplicate_feed.pk != self.pk:
logging.debug(" ---> ~FRFound different feed (%s), merging %s in..." % (duplicate_feeds[0], self.pk))
feed = Feed.get_by_id(merge_feeds(duplicate_feeds[0].pk, self.pk))
return feed
else:
logging.debug(" ---> ~FRFeed is its own dupe? %s == %s" % (self, duplicate_feeds))
except DatabaseError, e:
logging.debug(" ---> ~FBFeed update failed, no change: %s / %s..." % (kwargs.get('update_fields', None), e))
pass
return self
@classmethod
def index_all_for_search(cls, offset=0, subscribers=2):
if not offset:
SearchFeed.create_elasticsearch_mapping(delete=True)
last_pk = cls.objects.latest('pk').pk
for f in xrange(offset, last_pk, 1000):
print " ---> %s / %s (%.2s%%)" % (f, last_pk, float(f)/last_pk*100)
feeds = Feed.objects.filter(pk__in=range(f, f+1000),
active=True,
active_subscribers__gte=subscribers)\
.values_list('pk')
for feed_id, in feeds:
Feed.objects.get(pk=feed_id).index_feed_for_search()
def index_feed_for_search(self):
if self.num_subscribers > 1 and not self.branch_from_feed and not self.is_newsletter:
SearchFeed.index(feed_id=self.pk,
title=self.feed_title,
address=self.feed_address,
link=self.feed_link,
num_subscribers=self.num_subscribers)
def index_stories_for_search(self):
if self.search_indexed: return
self.search_indexed = True
self.save()
stories = MStory.objects(story_feed_id=self.pk)
for story in stories:
story.index_story_for_search()
def sync_redis(self):
return MStory.sync_feed_redis(self.pk)
def expire_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
r.expire('F:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('F:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.expire('zF:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.expire('zF:%s' % self.pk, settings.DAYS_OF_STORY_HASHES*24*60*60)
@classmethod
def low_volume_feeds(cls, feed_ids, stories_per_month=30):
try:
stories_per_month = int(stories_per_month)
except ValueError:
stories_per_month = 30
feeds = Feed.objects.filter(pk__in=feed_ids, average_stories_per_month__lte=stories_per_month).only('pk')
return [f.pk for f in feeds]
@classmethod
def autocomplete(self, prefix, limit=5):
results = SearchFeed.query(prefix)
feed_ids = [result.feed_id for result in results[:5]]
# results = SearchQuerySet().autocomplete(address=prefix).order_by('-num_subscribers')[:limit]
#
# if len(results) < limit:
# results += SearchQuerySet().autocomplete(title=prefix).order_by('-num_subscribers')[:limit-len(results)]
#
return feed_ids
@classmethod
def find_or_create(cls, feed_address, feed_link, defaults=None, **kwargs):
feeds = cls.objects.filter(feed_address=feed_address, feed_link=feed_link)
if feeds:
return feeds[0], False
if feed_link and feed_link.endswith('/'):
feeds = cls.objects.filter(feed_address=feed_address, feed_link=feed_link[:-1])
if feeds:
return feeds[0], False
try:
feed = cls.objects.get(feed_address=feed_address, feed_link=feed_link)
return feed, False
except cls.DoesNotExist:
feed = cls(**defaults)
feed.save()
return feed, True
@classmethod
def merge_feeds(cls, *args, **kwargs):
return merge_feeds(*args, **kwargs)
def fix_google_alerts_urls(self):
if (self.feed_address.startswith('http://user/') and
'/state/com.google/alerts/' in self.feed_address):
match = re.match(r"http://user/(\d+)/state/com.google/alerts/(\d+)", self.feed_address)
if match:
user_id, alert_id = match.groups()
self.feed_address = "http://www.google.com/alerts/feeds/%s/%s" % (user_id, alert_id)
@classmethod
def schedule_feed_fetches_immediately(cls, feed_ids, user_id=None):
if settings.DEBUG:
logging.info(" ---> ~SN~FMSkipping the scheduling immediate fetch of ~SB%s~SN feeds (in DEBUG)..." %
len(feed_ids))
return
if user_id:
user = User.objects.get(pk=user_id)
logging.user(user, "~SN~FMScheduling immediate fetch of ~SB%s~SN feeds..." %
len(feed_ids))
else:
logging.debug(" ---> ~SN~FMScheduling immediate fetch of ~SB%s~SN feeds..." %
len(feed_ids))
if len(feed_ids) > 100:
logging.debug(" ---> ~SN~FMFeeds scheduled: %s" % feed_ids)
day_ago = datetime.datetime.now() - datetime.timedelta(days=1)
feeds = Feed.objects.filter(pk__in=feed_ids)
for feed in feeds:
if feed.active_subscribers <= 0:
feed.count_subscribers()
if not feed.active or feed.next_scheduled_update < day_ago:
feed.schedule_feed_fetch_immediately(verbose=False)
@property
def favicon_fetching(self):
return bool(not (self.favicon_not_found or self.favicon_color))
@classmethod
def get_feed_from_url(cls, url, create=True, aggressive=False, fetch=True, offset=0, user=None, interactive=False):
feed = None
without_rss = False
original_url = url
if url and url.startswith('newsletter:'):
return cls.objects.get(feed_address=url)
if url and re.match('(https?://)?twitter.com/\w+/?', url):
without_rss = True
if url and re.match(r'(https?://)?(www\.)?facebook.com/\w+/?$', url):
without_rss = True
if url and 'youtube.com/user/' in url:
username = re.search('youtube.com/user/(\w+)', url).group(1)
url = "http://gdata.youtube.com/feeds/base/users/%s/uploads" % username
without_rss = True
if url and 'youtube.com/channel/' in url:
channel_id = re.search('youtube.com/channel/([-_\w]+)', url).group(1)
url = "https://www.youtube.com/feeds/videos.xml?channel_id=%s" % channel_id
without_rss = True
if url and 'youtube.com/feeds' in url:
without_rss = True
if url and 'youtube.com/playlist' in url:
without_rss = True
def criteria(key, value):
if aggressive:
return {'%s__icontains' % key: value}
else:
return {'%s' % key: value}
def by_url(address):
feed = cls.objects.filter(
branch_from_feed=None
).filter(**criteria('feed_address', address)).order_by('-num_subscribers')
if not feed:
duplicate_feed = DuplicateFeed.objects.filter(**criteria('duplicate_address', address))
if duplicate_feed and len(duplicate_feed) > offset:
feed = [duplicate_feed[offset].feed]
if not feed and aggressive:
feed = cls.objects.filter(
branch_from_feed=None
).filter(**criteria('feed_link', address)).order_by('-num_subscribers')
return feed
@timelimit(10)
def _feedfinder_forman(url):
found_feed_urls = feedfinder_forman.find_feeds(url)
return found_feed_urls
@timelimit(10)
def _feedfinder_pilgrim(url):
found_feed_urls = feedfinder_pilgrim.feeds(url)
return found_feed_urls
# Normalize and check for feed_address, dupes, and feed_link
url = urlnorm.normalize(url)
if not url:
logging.debug(" ---> ~FRCouldn't normalize url: ~SB%s" % url)
return
feed = by_url(url)
found_feed_urls = []
if interactive:
import pdb; pdb.set_trace()
# Create if it looks good
if feed and len(feed) > offset:
feed = feed[offset]
else:
try:
found_feed_urls = _feedfinder_forman(url)
except TimeoutError:
logging.debug(' ---> Feed finder timed out...')
found_feed_urls = []
if not found_feed_urls:
try:
found_feed_urls = _feedfinder_pilgrim(url)
except TimeoutError:
logging.debug(' ---> Feed finder old timed out...')
found_feed_urls = []
if len(found_feed_urls):
feed_finder_url = found_feed_urls[0]
logging.debug(" ---> Found feed URLs for %s: %s" % (url, found_feed_urls))
feed = by_url(feed_finder_url)
if feed and len(feed) > offset:
feed = feed[offset]
logging.debug(" ---> Feed exists (%s), updating..." % (feed))
feed = feed.update()
elif create:
logging.debug(" ---> Feed doesn't exist, creating: %s" % (feed_finder_url))
feed = cls.objects.create(feed_address=feed_finder_url)
feed = feed.update()
elif without_rss:
logging.debug(" ---> Found without_rss feed: %s / %s" % (url, original_url))
feed = cls.objects.create(feed_address=url, feed_link=original_url)
feed = feed.update(requesting_user_id=user.pk if user else None)
# Check for JSON feed
if not feed and fetch and create:
try:
r = requests.get(url)
except (requests.ConnectionError, requests.models.InvalidURL):
r = None
if r and 'application/json' in r.headers.get('Content-Type'):
feed = cls.objects.create(feed_address=url)
feed = feed.update()
# Still nothing? Maybe the URL has some clues.
if not feed and fetch and len(found_feed_urls):
feed_finder_url = found_feed_urls[0]
feed = by_url(feed_finder_url)
if not feed and create:
feed = cls.objects.create(feed_address=feed_finder_url)
feed = feed.update()
elif feed and len(feed) > offset:
feed = feed[offset]
# Not created and not within bounds, so toss results.
if isinstance(feed, QuerySet):
logging.debug(" ---> ~FRNot created and not within bounds, tossing: ~SB%s" % feed)
return
return feed
@classmethod
def task_feeds(cls, feeds, queue_size=12, verbose=True):
if not feeds: return
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
if isinstance(feeds, Feed):
if verbose:
logging.debug(" ---> ~SN~FBTasking feed: ~SB%s" % feeds)
feeds = [feeds.pk]
elif verbose:
logging.debug(" ---> ~SN~FBTasking ~SB~FC%s~FB~SN feeds..." % len(feeds))
if isinstance(feeds, QuerySet):
feeds = [f.pk for f in feeds]
r.srem('queued_feeds', *feeds)
now = datetime.datetime.now().strftime("%s")
p = r.pipeline()
for feed_id in feeds:
p.zadd('tasked_feeds', feed_id, now)
p.execute()
# for feed_ids in (feeds[pos:pos + queue_size] for pos in xrange(0, len(feeds), queue_size)):
for feed_id in feeds:
UpdateFeeds.apply_async(args=(feed_id,), queue='update_feeds')
@classmethod
def drain_task_feeds(cls):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
tasked_feeds = r.zrange('tasked_feeds', 0, -1)
logging.debug(" ---> ~FRDraining %s tasked feeds..." % len(tasked_feeds))
r.sadd('queued_feeds', *tasked_feeds)
r.zremrangebyrank('tasked_feeds', 0, -1)
errored_feeds = r.zrange('error_feeds', 0, -1)
logging.debug(" ---> ~FRDraining %s errored feeds..." % len(errored_feeds))
r.sadd('queued_feeds', *errored_feeds)
r.zremrangebyrank('error_feeds', 0, -1)
def update_all_statistics(self, has_new_stories=False, force=False):
recount = not self.counts_converted_to_redis
count_extra = False
if random.random() < 0.01 or not self.data.popular_tags or not self.data.popular_authors:
count_extra = True
self.count_subscribers(recount=recount)
self.calculate_last_story_date()
if force or has_new_stories or count_extra:
self.save_feed_stories_last_month()
if force or (has_new_stories and count_extra):
self.save_popular_authors()
self.save_popular_tags()
self.save_feed_story_history_statistics()
def calculate_last_story_date(self):
last_story_date = None
try:
latest_story = MStory.objects(
story_feed_id=self.pk
).limit(1).order_by('-story_date').only('story_date').first()
if latest_story:
last_story_date = latest_story.story_date
except MStory.DoesNotExist:
pass
if not last_story_date or seconds_timesince(last_story_date) < 0:
last_story_date = datetime.datetime.now()
if last_story_date != self.last_story_date:
self.last_story_date = last_story_date
self.save(update_fields=['last_story_date'])
@classmethod
def setup_feeds_for_premium_subscribers(cls, feed_ids):
logging.info(" ---> ~SN~FMScheduling immediate premium setup of ~SB%s~SN feeds..." %
len(feed_ids))
feeds = Feed.objects.filter(pk__in=feed_ids)
for feed in feeds:
feed.setup_feed_for_premium_subscribers()
def setup_feed_for_premium_subscribers(self):
self.count_subscribers()
self.set_next_scheduled_update()
def check_feed_link_for_feed_address(self):
@timelimit(10)
def _1():
feed_address = None
feed = self
found_feed_urls = []
try:
logging.debug(" ---> Checking: %s" % self.feed_address)
found_feed_urls = feedfinder_forman.find_feeds(self.feed_address)
if found_feed_urls:
feed_address = found_feed_urls[0]
except KeyError:
pass
if not len(found_feed_urls) and self.feed_link:
found_feed_urls = feedfinder_forman.find_feeds(self.feed_link)
if len(found_feed_urls) and found_feed_urls[0] != self.feed_address:
feed_address = found_feed_urls[0]
if feed_address:
if any(ignored_domain in feed_address for ignored_domain in [
'feedburner.com/atom.xml',
'feedburner.com/feed/',
'feedsportal.com',
]):
logging.debug(" ---> Feed points to 'Wierdo' or 'feedsportal', ignoring.")
return False, self
try:
self.feed_address = feed_address
feed = self.save()
feed.count_subscribers()
feed.schedule_feed_fetch_immediately()
feed.has_feed_exception = False
feed.active = True
feed = feed.save()
except IntegrityError:
original_feed = Feed.objects.get(feed_address=feed_address, feed_link=self.feed_link)
original_feed.has_feed_exception = False
original_feed.active = True
original_feed.save()
merge_feeds(original_feed.pk, self.pk)
return feed_address, feed
if self.feed_address_locked:
return False, self
try:
feed_address, feed = _1()
except TimeoutError, e:
logging.debug(' ---> [%-30s] Feed address check timed out...' % (self.log_title[:30]))
self.save_feed_history(505, 'Timeout', e)
feed = self
feed_address = None
return bool(feed_address), feed
def save_feed_history(self, status_code, message, exception=None, date=None):
fetch_history = MFetchHistory.add(feed_id=self.pk,
fetch_type='feed',
code=int(status_code),
date=date,
message=message,
exception=exception)
if status_code not in (200, 304):
self.errors_since_good += 1
self.count_errors_in_history('feed', status_code, fetch_history=fetch_history)
self.set_next_scheduled_update()
elif self.has_feed_exception or self.errors_since_good:
self.errors_since_good = 0
self.has_feed_exception = False
self.active = True
self.save()
def save_page_history(self, status_code, message, exception=None, date=None):
fetch_history = MFetchHistory.add(feed_id=self.pk,
fetch_type='page',
code=int(status_code),
date=date,
message=message,
exception=exception)
if status_code not in (200, 304):
self.count_errors_in_history('page', status_code, fetch_history=fetch_history)
elif self.has_page_exception or not self.has_page:
self.has_page_exception = False
self.has_page = True
self.active = True
self.save()
def save_raw_feed(self, raw_feed, fetch_date):
MFetchHistory.add(feed_id=self.pk,
fetch_type='raw_feed',
code=200,
message=raw_feed,
date=fetch_date)
def count_errors_in_history(self, exception_type='feed', status_code=None, fetch_history=None):
if not fetch_history:
fetch_history = MFetchHistory.feed(self.pk)
fh = fetch_history[exception_type + '_fetch_history']
non_errors = [h for h in fh if h['status_code'] and int(h['status_code']) in (200, 304)]
errors = [h for h in fh if h['status_code'] and int(h['status_code']) not in (200, 304)]
if len(non_errors) == 0 and len(errors) > 1:
self.active = True
if exception_type == 'feed':
self.has_feed_exception = True
# self.active = False # No longer, just geometrically fetch
elif exception_type == 'page':
self.has_page_exception = True
self.exception_code = status_code or int(errors[0])
self.save()
elif self.exception_code > 0:
self.active = True
self.exception_code = 0
if exception_type == 'feed':
self.has_feed_exception = False
elif exception_type == 'page':
self.has_page_exception = False
self.save()
logging.debug(' ---> [%-30s] ~FBCounting any errors in history: %s (%s non errors)' %
(self.log_title[:30], len(errors), len(non_errors)))
return errors, non_errors
def count_redirects_in_history(self, fetch_type='feed', fetch_history=None):
logging.debug(' ---> [%-30s] Counting redirects in history...' % (self.log_title[:30]))
if not fetch_history:
fetch_history = MFetchHistory.feed(self.pk)
fh = fetch_history[fetch_type+'_fetch_history']
redirects = [h for h in fh if h['status_code'] and int(h['status_code']) in (301, 302)]
non_redirects = [h for h in fh if h['status_code'] and int(h['status_code']) not in (301, 302)]
return redirects, non_redirects
@property
def original_feed_id(self):
if self.branch_from_feed:
return self.branch_from_feed.pk
else:
return self.pk
@property
def counts_converted_to_redis(self):
SUBSCRIBER_EXPIRE_DATE = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
subscriber_expire = int(SUBSCRIBER_EXPIRE_DATE.strftime('%s'))
r = redis.Redis(connection_pool=settings.REDIS_FEED_SUB_POOL)
total_key = "s:%s" % self.original_feed_id
premium_key = "sp:%s" % self.original_feed_id
last_recount = r.zscore(total_key, -1) # Need to subtract this extra when counting subs
last_recount = r.zscore(premium_key, -1) # Need to subtract this extra when counting subs
# Check for expired feeds with no active users who would have triggered a cleanup
if last_recount and last_recount > subscriber_expire:
return True
elif last_recount:
logging.info(" ---> [%-30s] ~SN~FBFeed has expired redis subscriber counts (%s < %s), clearing..." % (
self.log_title[:30], last_recount, subscriber_expire))
r.delete(total_key, -1)
r.delete(premium_key, -1)
return False
def count_subscribers(self, recount=True, verbose=False):
if recount or not self.counts_converted_to_redis:
from apps.profile.models import Profile
Profile.count_feed_subscribers(feed_id=self.pk)
SUBSCRIBER_EXPIRE_DATE = datetime.datetime.now() - datetime.timedelta(days=settings.SUBSCRIBER_EXPIRE)
subscriber_expire = int(SUBSCRIBER_EXPIRE_DATE.strftime('%s'))
now = int(datetime.datetime.now().strftime('%s'))
r = redis.Redis(connection_pool=settings.REDIS_FEED_SUB_POOL)
total = 0
active = 0
premium = 0
active_premium = 0
# Include all branched feeds in counts
feed_ids = [f['id'] for f in Feed.objects.filter(branch_from_feed=self.original_feed_id).values('id')]
feed_ids.append(self.original_feed_id)
feed_ids = list(set(feed_ids))
if self.counts_converted_to_redis:
# For each branched feed, count different subscribers
for feed_id in feed_ids:
pipeline = r.pipeline()
# now+1 ensures `-1` flag will be corrected for later with - 1
total_key = "s:%s" % feed_id
premium_key = "sp:%s" % feed_id
pipeline.zcard(total_key)
pipeline.zcount(total_key, subscriber_expire, now+1)
pipeline.zcard(premium_key)
pipeline.zcount(premium_key, subscriber_expire, now+1)
results = pipeline.execute()
# -1 due to counts_converted_to_redis using key=-1 for last_recount date
total += max(0, results[0] - 1)
active += max(0, results[1] - 1)
premium += max(0, results[2] - 1)
active_premium += max(0, results[3] - 1)
original_num_subscribers = self.num_subscribers
original_active_subs = self.active_subscribers
original_premium_subscribers = self.premium_subscribers
original_active_premium_subscribers = self.active_premium_subscribers
logging.info(" ---> [%-30s] ~SN~FBCounting subscribers from ~FCredis~FB: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s ~SN~FC%s" %
(self.log_title[:30], total, active, premium, active_premium, "(%s branches)" % (len(feed_ids)-1) if len(feed_ids)>1 else ""))
else:
from apps.reader.models import UserSubscription
subs = UserSubscription.objects.filter(feed__in=feed_ids)
original_num_subscribers = self.num_subscribers
total = subs.count()
active_subs = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__last_seen_on__gte=SUBSCRIBER_EXPIRE_DATE
)
original_active_subs = self.active_subscribers
active = active_subs.count()
premium_subs = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__is_premium=True
)
original_premium_subscribers = self.premium_subscribers
premium = premium_subs.count()
active_premium_subscribers = UserSubscription.objects.filter(
feed__in=feed_ids,
active=True,
user__profile__is_premium=True,
user__profile__last_seen_on__gte=SUBSCRIBER_EXPIRE_DATE
)
original_active_premium_subscribers = self.active_premium_subscribers
active_premium = active_premium_subscribers.count()
logging.debug(" ---> [%-30s] ~SN~FBCounting subscribers from ~FYpostgres~FB: ~FMt:~SB~FM%s~SN a:~SB%s~SN p:~SB%s~SN ap:~SB%s" %
(self.log_title[:30], total, active, premium, active_premium))
# If any counts have changed, save them
self.num_subscribers = total
self.active_subscribers = active
self.premium_subscribers = premium
self.active_premium_subscribers = active_premium
if (self.num_subscribers != original_num_subscribers or
self.active_subscribers != original_active_subs or
self.premium_subscribers != original_premium_subscribers or
self.active_premium_subscribers != original_active_premium_subscribers):
if original_premium_subscribers == -1 or original_active_premium_subscribers == -1:
self.save()
else:
self.save(update_fields=['num_subscribers', 'active_subscribers',
'premium_subscribers', 'active_premium_subscribers'])
if verbose:
if self.num_subscribers <= 1:
print '.',
else:
print "\n %s> %s subscriber%s: %s" % (
'-' * min(self.num_subscribers, 20),
self.num_subscribers,
'' if self.num_subscribers == 1 else 's',
self.feed_title,
),
def _split_favicon_color(self):
color = self.favicon_color
if color:
splitter = lambda s, p: [s[i:i+p] for i in range(0, len(s), p)]
red, green, blue = splitter(color[:6], 2)
return red, green, blue
return None, None, None
def favicon_fade(self):
red, green, blue = self._split_favicon_color()
if red and green and blue:
fade_red = hex(min(int(red, 16) + 35, 255))[2:].zfill(2)
fade_green = hex(min(int(green, 16) + 35, 255))[2:].zfill(2)
fade_blue = hex(min(int(blue, 16) + 35, 255))[2:].zfill(2)
return "%s%s%s" % (fade_red, fade_green, fade_blue)
def favicon_border(self):
red, green, blue = self._split_favicon_color()
if red and green and blue:
fade_red = hex(min(int(int(red, 16) * .75), 255))[2:].zfill(2)
fade_green = hex(min(int(int(green, 16) * .75), 255))[2:].zfill(2)
fade_blue = hex(min(int(int(blue, 16) * .75), 255))[2:].zfill(2)
return "%s%s%s" % (fade_red, fade_green, fade_blue)
def favicon_text_color(self):
# Color format: {r: 1, g: .5, b: 0}
def contrast(color1, color2):
lum1 = luminosity(color1)
lum2 = luminosity(color2)
if lum1 > lum2:
return (lum1 + 0.05) / (lum2 + 0.05)
else:
return (lum2 + 0.05) / (lum1 + 0.05)
def luminosity(color):
r = color['red']
g = color['green']
b = color['blue']
val = lambda c: c/12.92 if c <= 0.02928 else math.pow(((c + 0.055)/1.055), 2.4)
red = val(r)
green = val(g)
blue = val(b)
return 0.2126 * red + 0.7152 * green + 0.0722 * blue
red, green, blue = self._split_favicon_color()
if red and green and blue:
color = {
'red': int(red, 16) / 256.0,
'green': int(green, 16) / 256.0,
'blue': int(blue, 16) / 256.0,
}
white = {
'red': 1,
'green': 1,
'blue': 1,
}
grey = {
'red': 0.5,
'green': 0.5,
'blue': 0.5,
}
if contrast(color, white) > contrast(color, grey):
return 'white'
else:
return 'black'
def save_feed_stories_last_month(self, verbose=False):
month_ago = datetime.datetime.utcnow() - datetime.timedelta(days=30)
stories_last_month = MStory.objects(story_feed_id=self.pk,
story_date__gte=month_ago).count()
if self.stories_last_month != stories_last_month:
self.stories_last_month = stories_last_month
self.save(update_fields=['stories_last_month'])
if verbose:
print " ---> %s [%s]: %s stories last month" % (self.feed_title, self.pk,
self.stories_last_month)
def save_feed_story_history_statistics(self, current_counts=None):
"""
Fills in missing months between earlier occurances and now.
Save format: [('YYYY-MM, #), ...]
Example output: [(2010-12, 123), (2011-01, 146)]
"""
now = datetime.datetime.utcnow()
min_year = now.year
total = 0
month_count = 0
if not current_counts:
current_counts = self.data.story_count_history and json.decode(self.data.story_count_history)
if isinstance(current_counts, dict):
current_counts = current_counts['months']
if not current_counts:
current_counts = []
# Count stories, aggregate by year and month. Map Reduce!
map_f = """
function() {
var date = (this.story_date.getFullYear()) + "-" + (this.story_date.getMonth()+1);
var hour = this.story_date.getUTCHours();
var day = this.story_date.getDay();
emit(this.story_hash, {'month': date, 'hour': hour, 'day': day});
}
"""
reduce_f = """
function(key, values) {
return values;
}
"""
dates = defaultdict(int)
hours = defaultdict(int)
days = defaultdict(int)
results = MStory.objects(story_feed_id=self.pk).map_reduce(map_f, reduce_f, output='inline')
for result in results:
dates[result.value['month']] += 1
hours[int(result.value['hour'])] += 1
days[int(result.value['day'])] += 1
year = int(re.findall(r"(\d{4})-\d{1,2}", result.value['month'])[0])
if year < min_year and year > 2000:
min_year = year
# Add on to existing months, always amending up, never down. (Current month
# is guaranteed to be accurate, since trim_feeds won't delete it until after
# a month. Hacker News can have 1,000+ and still be counted.)
for current_month, current_count in current_counts:
year = int(re.findall(r"(\d{4})-\d{1,2}", current_month)[0])
if current_month not in dates or dates[current_month] < current_count:
dates[current_month] = current_count
if year < min_year and year > 2000:
min_year = year
# Assemble a list with 0's filled in for missing months,
# trimming left and right 0's.
months = []
start = False
for year in range(min_year, now.year+1):
for month in range(1, 12+1):
if datetime.datetime(year, month, 1) < now:
key = u'%s-%s' % (year, month)
if dates.get(key) or start:
start = True
months.append((key, dates.get(key, 0)))
total += dates.get(key, 0)
if dates.get(key, 0) > 0:
month_count += 1 # Only count months that have stories for the average
original_story_count_history = self.data.story_count_history
self.data.story_count_history = json.encode({'months': months, 'hours': hours, 'days': days})
if self.data.story_count_history != original_story_count_history:
self.data.save(update_fields=['story_count_history'])
original_average_stories_per_month = self.average_stories_per_month
if not total or not month_count:
self.average_stories_per_month = 0
else:
self.average_stories_per_month = int(round(total / float(month_count)))
if self.average_stories_per_month != original_average_stories_per_month:
self.save(update_fields=['average_stories_per_month'])
def save_classifier_counts(self):
from apps.analyzer.models import MClassifierTitle, MClassifierAuthor, MClassifierFeed, MClassifierTag
def calculate_scores(cls, facet):
map_f = """
function() {
emit(this["%s"], {
pos: this.score>0 ? this.score : 0,
neg: this.score<0 ? Math.abs(this.score) : 0
});
}
""" % (facet)
reduce_f = """
function(key, values) {
var result = {pos: 0, neg: 0};
values.forEach(function(value) {
result.pos += value.pos;
result.neg += value.neg;
});
return result;
}
"""
scores = []
res = cls.objects(feed_id=self.pk).map_reduce(map_f, reduce_f, output='inline')
for r in res:
facet_values = dict([(k, int(v)) for k,v in r.value.iteritems()])
facet_values[facet] = r.key
if facet_values['pos'] + facet_values['neg'] >= 1:
scores.append(facet_values)
scores = sorted(scores, key=lambda v: v['neg'] - v['pos'])
return scores
scores = {}
for cls, facet in [(MClassifierTitle, 'title'),
(MClassifierAuthor, 'author'),
(MClassifierTag, 'tag'),
(MClassifierFeed, 'feed_id')]:
scores[facet] = calculate_scores(cls, facet)
if facet == 'feed_id' and scores[facet]:
scores['feed'] = scores[facet]
del scores['feed_id']
elif not scores[facet]:
del scores[facet]
if scores:
self.data.feed_classifier_counts = json.encode(scores)
self.data.save()
return scores
@property
def user_agent(self):
feed_parts = urlparse.urlparse(self.feed_address)
if feed_parts.netloc.find('.tumblr.com') != -1:
# Certain tumblr feeds will redirect to tumblr's login page when fetching.
# A known workaround is using facebook's user agent.
return 'facebookexternalhit/1.0 (+http://www.facebook.com/externalhit_uatext.php)'
ua = ('NewsBlur Feed Fetcher - %s subscriber%s - %s '
'(Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/56.0.2924.87 Safari/537.36)' % (
self.num_subscribers,
's' if self.num_subscribers != 1 else '',
self.permalink,
))
return ua
@property
def fake_user_agent(self):
ua = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:49.0) Gecko/20100101 Firefox/49.0"
return ua
def fetch_headers(self, fake=False):
headers = {
'User-Agent': self.user_agent if not fake else self.fake_user_agent,
'Accept': 'application/atom+xml, application/rss+xml, application/xml;q=0.8, text/xml;q=0.6, */*;q=0.2',
'Accept-Encoding': 'gzip, deflate',
}
return headers
def update(self, **kwargs):
try:
from utils import feed_fetcher
except ImportError, e:
logging.info(" ***> ~BR~FRImportError: %s" % e)
return
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
original_feed_id = int(self.pk)
options = {
'verbose': kwargs.get('verbose'),
'timeout': 10,
'single_threaded': kwargs.get('single_threaded', True),
'force': kwargs.get('force'),
'force_fp': kwargs.get('force_fp'),
'compute_scores': kwargs.get('compute_scores', True),
'mongodb_replication_lag': kwargs.get('mongodb_replication_lag', None),
'fake': kwargs.get('fake'),
'quick': kwargs.get('quick'),
'updates_off': kwargs.get('updates_off'),
'debug': kwargs.get('debug'),
'fpf': kwargs.get('fpf'),
'feed_xml': kwargs.get('feed_xml'),
'requesting_user_id': kwargs.get('requesting_user_id', None)
}
if getattr(settings, 'TEST_DEBUG', False):
print " ---> Testing feed fetch: %s" % self.log_title
# options['force_fp'] = True # No, why would this be needed?
original_feed_address = self.feed_address
original_feed_link = self.feed_link
self.feed_address = self.feed_address.replace("%(NEWSBLUR_DIR)s", settings.NEWSBLUR_DIR)
if self.feed_link:
self.feed_link = self.feed_link.replace("%(NEWSBLUR_DIR)s", settings.NEWSBLUR_DIR)
if self.feed_address != original_feed_address or self.feed_link != original_feed_link:
self.save(update_fields=['feed_address', 'feed_link'])
if self.is_newsletter:
feed = self.update_newsletter_icon()
else:
disp = feed_fetcher.Dispatcher(options, 1)
disp.add_jobs([[self.pk]])
feed = disp.run_jobs()
if feed:
feed = Feed.get_by_id(feed.pk)
if feed:
feed.last_update = datetime.datetime.utcnow()
feed.set_next_scheduled_update()
r.zadd('fetched_feeds_last_hour', feed.pk, int(datetime.datetime.now().strftime('%s')))
if not feed or original_feed_id != feed.pk:
logging.info(" ---> ~FRFeed changed id, removing %s from tasked_feeds queue..." % original_feed_id)
r.zrem('tasked_feeds', original_feed_id)
r.zrem('error_feeds', original_feed_id)
if feed:
r.zrem('tasked_feeds', feed.pk)
r.zrem('error_feeds', feed.pk)
return feed
def update_newsletter_icon(self):
from apps.rss_feeds.icon_importer import IconImporter
icon_importer = IconImporter(self)
icon_importer.save()
return self
@classmethod
def get_by_id(cls, feed_id, feed_address=None):
try:
feed = Feed.objects.get(pk=feed_id)
return feed
except Feed.DoesNotExist:
# Feed has been merged after updating. Find the right feed.
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if duplicate_feeds:
return duplicate_feeds[0].feed
if feed_address:
duplicate_feeds = DuplicateFeed.objects.filter(duplicate_address=feed_address)
if duplicate_feeds:
return duplicate_feeds[0].feed
@classmethod
def get_by_name(cls, query, limit=1):
results = SearchFeed.query(query)
feed_ids = [result.feed_id for result in results]
if limit == 1:
return Feed.get_by_id(feed_ids[0])
else:
return [Feed.get_by_id(f) for f in feed_ids][:limit]
def add_update_stories(self, stories, existing_stories, verbose=False, updates_off=False):
ret_values = dict(new=0, updated=0, same=0, error=0)
error_count = self.error_count
new_story_hashes = [s.get('story_hash') for s in stories]
if settings.DEBUG or verbose:
logging.debug(" ---> [%-30s] ~FBChecking ~SB%s~SN new/updated against ~SB%s~SN stories" % (
self.log_title[:30],
len(stories),
len(existing_stories.keys())))
@timelimit(2)
def _1(story, story_content, existing_stories, new_story_hashes):
existing_story, story_has_changed = self._exists_story(story, story_content,
existing_stories, new_story_hashes)
return existing_story, story_has_changed
for story in stories:
if verbose:
logging.debug(" ---> [%-30s] ~FBChecking ~SB%s~SN / ~SB%s" % (
self.log_title[:30],
story.get('title'),
story.get('guid')))
story_content = story.get('story_content')
if error_count:
story_content = strip_comments__lxml(story_content)
else:
story_content = strip_comments(story_content)
story_tags = self.get_tags(story)
story_link = self.get_permalink(story)
replace_story_date = False
try:
existing_story, story_has_changed = _1(story, story_content,
existing_stories, new_story_hashes)
except TimeoutError, e:
logging.debug(' ---> [%-30s] ~SB~FRExisting story check timed out...' % (self.log_title[:30]))
existing_story = None
story_has_changed = False
if existing_story is None:
if settings.DEBUG and False:
logging.debug(' ---> New story in feed (%s - %s): %s' % (self.feed_title, story.get('title'), len(story_content)))
s = MStory(story_feed_id = self.pk,
story_date = story.get('published'),
story_title = story.get('title'),
story_content = story_content,
story_author_name = story.get('author'),
story_permalink = story_link,
story_guid = story.get('guid'),
story_tags = story_tags
)
try:
s.save()
ret_values['new'] += 1
s.publish_to_subscribers()
except (IntegrityError, OperationError), e:
ret_values['error'] += 1
if settings.DEBUG:
logging.info(' ---> [%-30s] ~SN~FRIntegrityError on new story: %s - %s' % (self.feed_title[:30], story.get('guid'), e))
if self.search_indexed:
s.index_story_for_search()
elif existing_story and story_has_changed and not updates_off and ret_values['updated'] < 3:
# update story
original_content = None
try:
if existing_story and existing_story.id:
try:
existing_story = MStory.objects.get(id=existing_story.id)
except ValidationError:
existing_story, _ = MStory.find_story(existing_story.story_feed_id,
existing_story.id,
original_only=True)
elif existing_story and existing_story.story_hash:
existing_story, _ = MStory.find_story(existing_story.story_feed_id,
existing_story.story_hash,
original_only=True)
else:
raise MStory.DoesNotExist
except (MStory.DoesNotExist, OperationError), e:
ret_values['error'] += 1
if verbose:
logging.info(' ---> [%-30s] ~SN~FROperation on existing story: %s - %s' % (self.feed_title[:30], story.get('guid'), e))
continue
if existing_story.story_original_content_z:
original_content = zlib.decompress(existing_story.story_original_content_z)
elif existing_story.story_content_z:
original_content = zlib.decompress(existing_story.story_content_z)
# print 'Type: %s %s' % (type(original_content), type(story_content))
if story_content and len(story_content) > 10:
if "<code" in story_content:
# Don't mangle stories with code, just use new
story_content_diff = story_content
else:
story_content_diff = htmldiff(smart_unicode(original_content), smart_unicode(story_content))
else:
story_content_diff = original_content
# logging.debug("\t\tDiff: %s %s %s" % diff.getStats())
# logging.debug("\t\tDiff content: %s" % diff.getDiff())
# if existing_story.story_title != story.get('title'):
# logging.debug('\tExisting title / New: : \n\t\t- %s\n\t\t- %s' % (existing_story.story_title, story.get('title')))
if existing_story.story_hash != story.get('story_hash'):
self.update_story_with_new_guid(existing_story, story.get('guid'))
if verbose:
logging.debug('- Updated story in feed (%s - %s): %s / %s' % (self.feed_title, story.get('title'), len(story_content_diff), len(story_content)))
existing_story.story_feed = self.pk
existing_story.story_title = story.get('title')
existing_story.story_content = story_content_diff
existing_story.story_latest_content = story_content
existing_story.story_original_content = original_content
existing_story.story_author_name = story.get('author')
existing_story.story_permalink = story_link
existing_story.story_guid = story.get('guid')
existing_story.story_tags = story_tags
existing_story.original_text_z = None # Reset Text view cache
# Do not allow publishers to change the story date once a story is published.
# Leads to incorrect unread story counts.
if replace_story_date:
existing_story.story_date = story.get('published') # Really shouldn't do this.
existing_story.extract_image_urls(force=True)
try:
existing_story.save()
ret_values['updated'] += 1
except (IntegrityError, OperationError):
ret_values['error'] += 1
if verbose:
logging.info(' ---> [%-30s] ~SN~FRIntegrityError on updated story: %s' % (self.feed_title[:30], story.get('title')[:30]))
except ValidationError:
ret_values['error'] += 1
if verbose:
logging.info(' ---> [%-30s] ~SN~FRValidationError on updated story: %s' % (self.feed_title[:30], story.get('title')[:30]))
if self.search_indexed:
existing_story.index_story_for_search()
else:
ret_values['same'] += 1
if verbose:
logging.debug("Unchanged story (%s): %s / %s " % (story.get('story_hash'), story.get('guid'), story.get('title')))
return ret_values
def update_story_with_new_guid(self, existing_story, new_story_guid):
from apps.reader.models import RUserStory
from apps.social.models import MSharedStory
existing_story.remove_from_redis()
existing_story.remove_from_search_index()
old_hash = existing_story.story_hash
new_hash = MStory.ensure_story_hash(new_story_guid, self.pk)
RUserStory.switch_hash(feed=self, old_hash=old_hash, new_hash=new_hash)
shared_stories = MSharedStory.objects.filter(story_feed_id=self.pk,
story_hash=old_hash)
for story in shared_stories:
story.story_guid = new_story_guid
story.story_hash = new_hash
try:
story.save()
except NotUniqueError:
# Story is already shared, skip.
pass
def save_popular_tags(self, feed_tags=None, verbose=False):
if not feed_tags:
all_tags = MStory.objects(story_feed_id=self.pk,
story_tags__exists=True).item_frequencies('story_tags')
feed_tags = sorted([(k, v) for k, v in all_tags.items() if int(v) > 0],
key=itemgetter(1),
reverse=True)[:25]
popular_tags = json.encode(feed_tags)
if verbose:
print "Found %s tags: %s" % (len(feed_tags), popular_tags)
# TODO: This len() bullshit will be gone when feeds move to mongo
# On second thought, it might stay, because we don't want
# popular tags the size of a small planet. I'm looking at you
# Tumblr writers.
if len(popular_tags) < 1024:
if self.data.popular_tags != popular_tags:
self.data.popular_tags = popular_tags
self.data.save(update_fields=['popular_tags'])
return
tags_list = []
if feed_tags and isinstance(feed_tags, unicode):
tags_list = json.decode(feed_tags)
if len(tags_list) >= 1:
self.save_popular_tags(tags_list[:-1])
def save_popular_authors(self, feed_authors=None):
if not feed_authors:
authors = defaultdict(int)
for story in MStory.objects(story_feed_id=self.pk).only('story_author_name'):
authors[story.story_author_name] += 1
feed_authors = sorted([(k, v) for k, v in authors.items() if k],
key=itemgetter(1),
reverse=True)[:20]
popular_authors = json.encode(feed_authors)
if len(popular_authors) < 1023:
if self.data.popular_authors != popular_authors:
self.data.popular_authors = popular_authors
self.data.save(update_fields=['popular_authors'])
return
if len(feed_authors) > 1:
self.save_popular_authors(feed_authors=feed_authors[:-1])
@classmethod
def trim_old_stories(cls, start=0, verbose=True, dryrun=False, total=0):
now = datetime.datetime.now()
month_ago = now - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
feed_count = Feed.objects.latest('pk').pk
for feed_id in xrange(start, feed_count):
if feed_id % 1000 == 0:
print "\n\n -------------------------- %s (%s deleted so far) --------------------------\n\n" % (feed_id, total)
try:
feed = Feed.objects.get(pk=feed_id)
except Feed.DoesNotExist:
continue
if feed.active_subscribers <= 0 and (not feed.last_story_date or feed.last_story_date < month_ago):
months_ago = 6
if feed.last_story_date:
months_ago = int((now - feed.last_story_date).days / 30.0)
cutoff = max(1, 6 - months_ago)
if dryrun:
print " DRYRUN: %s cutoff - %s" % (cutoff, feed)
else:
total += MStory.trim_feed(feed=feed, cutoff=cutoff, verbose=verbose)
else:
if dryrun:
print " DRYRUN: %s/%s cutoff - %s" % (cutoff, feed.story_cutoff, feed)
else:
total += feed.trim_feed(verbose=verbose)
print " ---> Deleted %s stories in total." % total
@property
def story_cutoff(self):
cutoff = 500
if self.active_subscribers <= 0:
cutoff = 25
elif self.active_premium_subscribers < 1:
cutoff = 100
elif self.active_premium_subscribers <= 2:
cutoff = 200
elif self.active_premium_subscribers <= 5:
cutoff = 300
elif self.active_premium_subscribers <= 10:
cutoff = 350
elif self.active_premium_subscribers <= 15:
cutoff = 400
elif self.active_premium_subscribers <= 20:
cutoff = 450
if self.active_subscribers and self.average_stories_per_month < 5 and self.stories_last_month < 5:
cutoff /= 2
if self.active_premium_subscribers <= 1 and self.average_stories_per_month <= 1 and self.stories_last_month <= 1:
cutoff /= 2
r = redis.Redis(connection_pool=settings.REDIS_FEED_READ_POOL)
pipeline = r.pipeline()
read_stories_per_week = []
now = datetime.datetime.now()
for weeks_back in range(2*int(math.floor(settings.DAYS_OF_STORY_HASHES/7))):
weeks_ago = now - datetime.timedelta(days=7*weeks_back)
week_of_year = weeks_ago.strftime('%Y-%U')
feed_read_key = "fR:%s:%s" % (self.pk, week_of_year)
pipeline.get(feed_read_key)
read_stories_per_week = pipeline.execute()
read_stories_last_month = sum([int(rs) for rs in read_stories_per_week if rs])
if read_stories_last_month == 0:
original_cutoff = cutoff
cutoff = min(cutoff, 10)
try:
logging.debug(" ---> [%-30s] ~FBTrimming down to ~SB%s (instead of %s)~SN stories (~FM%s~FB)" % (self.log_title[:30], cutoff, original_cutoff, self.last_story_date.strftime("%Y-%m-%d") if self.last_story_date else "No last story date"))
except ValueError, e:
logging.debug(" ***> [%-30s] Error trimming: %s" % (self.log_title[:30], e))
pass
if getattr(settings, 'OVERRIDE_STORY_COUNT_MAX', None):
cutoff = settings.OVERRIDE_STORY_COUNT_MAX
return cutoff
def trim_feed(self, verbose=False, cutoff=None):
if not cutoff:
cutoff = self.story_cutoff
return MStory.trim_feed(feed=self, cutoff=cutoff, verbose=verbose)
def purge_feed_stories(self, update=True):
MStory.purge_feed_stories(feed=self, cutoff=self.story_cutoff)
if update:
self.update()
def purge_author(self, author):
all_stories = MStory.objects.filter(story_feed_id=self.pk)
author_stories = MStory.objects.filter(story_feed_id=self.pk, story_author_name__iexact=author)
logging.debug(" ---> Deleting %s of %s stories in %s by '%s'." % (author_stories.count(), all_stories.count(), self, author))
author_stories.delete()
def purge_tag(self, tag):
all_stories = MStory.objects.filter(story_feed_id=self.pk)
tagged_stories = MStory.objects.filter(story_feed_id=self.pk, story_tags__icontains=tag)
logging.debug(" ---> Deleting %s of %s stories in %s by '%s'." % (tagged_stories.count(), all_stories.count(), self, tag))
tagged_stories.delete()
# @staticmethod
# def clean_invalid_ids():
# history = MFeedFetchHistory.objects(status_code=500, exception__contains='InvalidId:')
# urls = set()
# for h in history:
# u = re.split('InvalidId: (.*?) is not a valid ObjectId\\n$', h.exception)[1]
# urls.add((h.feed_id, u))
#
# for f, u in urls:
# print "db.stories.remove({\"story_feed_id\": %s, \"_id\": \"%s\"})" % (f, u)
def get_stories(self, offset=0, limit=25, force=False):
stories_db = MStory.objects(story_feed_id=self.pk)[offset:offset+limit]
stories = self.format_stories(stories_db, self.pk)
return stories
@classmethod
def find_feed_stories(cls, feed_ids, query, order="newest", offset=0, limit=25):
story_ids = SearchStory.query(feed_ids=feed_ids, query=query, order=order,
offset=offset, limit=limit)
stories_db = MStory.objects(
story_hash__in=story_ids
).order_by('-story_date' if order == "newest" else 'story_date')
stories = cls.format_stories(stories_db)
return stories
@classmethod
def query_popularity(cls, query, limit, order='newest'):
popularity = {}
seen_feeds = set()
feed_title_to_id = dict()
# Collect stories, sort by feed
story_ids = SearchStory.global_query(query, order=order, offset=0, limit=limit)
for story_hash in story_ids:
feed_id, story_id = MStory.split_story_hash(story_hash)
feed = Feed.get_by_id(feed_id)
if not feed: continue
if feed.feed_title in seen_feeds:
feed_id = feed_title_to_id[feed.feed_title]
else:
feed_title_to_id[feed.feed_title] = feed_id
seen_feeds.add(feed.feed_title)
if feed_id not in popularity:
# feed.update_all_statistics()
# classifiers = feed.save_classifier_counts()
well_read_score = feed.well_read_score()
popularity[feed_id] = {
'feed_title': feed.feed_title,
'feed_url': feed.feed_link,
'num_subscribers': feed.num_subscribers,
'feed_id': feed.pk,
'story_ids': [],
'authors': {},
'read_pct': well_read_score['read_pct'],
'reader_count': well_read_score['reader_count'],
'story_count': well_read_score['story_count'],
'reach_score': well_read_score['reach_score'],
'share_count': well_read_score['share_count'],
'ps': 0,
'ng': 0,
'classifiers': json.decode(feed.data.feed_classifier_counts),
}
if popularity[feed_id]['classifiers']:
for classifier in popularity[feed_id]['classifiers'].get('feed', []):
if int(classifier['feed_id']) == int(feed_id):
popularity[feed_id]['ps'] = classifier['pos']
popularity[feed_id]['ng'] = -1 * classifier['neg']
popularity[feed_id]['story_ids'].append(story_hash)
sorted_popularity = sorted(popularity.values(), key=lambda x: x['reach_score'],
reverse=True)
# Extract story authors from feeds
for feed in sorted_popularity:
story_ids = feed['story_ids']
stories_db = MStory.objects(story_hash__in=story_ids)
stories = cls.format_stories(stories_db)
for story in stories:
story['story_permalink'] = story['story_permalink'][:250]
if story['story_authors'] not in feed['authors']:
feed['authors'][story['story_authors']] = {
'name': story['story_authors'],
'count': 0,
'ps': 0,
'ng': 0,
'tags': {},
'stories': [],
}
author = feed['authors'][story['story_authors']]
seen = False
for seen_story in author['stories']:
if seen_story['url'] == story['story_permalink']:
seen = True
break
else:
author['stories'].append({
'title': story['story_title'],
'url': story['story_permalink'],
'date': story['story_date'],
})
author['count'] += 1
if seen: continue # Don't recount tags
if feed['classifiers']:
for classifier in feed['classifiers'].get('author', []):
if classifier['author'] == author['name']:
author['ps'] = classifier['pos']
author['ng'] = -1 * classifier['neg']
for tag in story['story_tags']:
if tag not in author['tags']:
author['tags'][tag] = {'name': tag, 'count': 0, 'ps': 0, 'ng': 0}
author['tags'][tag]['count'] += 1
if feed['classifiers']:
for classifier in feed['classifiers'].get('tag', []):
if classifier['tag'] == tag:
author['tags'][tag]['ps'] = classifier['pos']
author['tags'][tag]['ng'] = -1 * classifier['neg']
sorted_authors = sorted(feed['authors'].values(), key=lambda x: x['count'])
feed['authors'] = sorted_authors
# pprint(sorted_popularity)
return sorted_popularity
def well_read_score(self):
from apps.reader.models import UserSubscription
from apps.social.models import MSharedStory
# Average percentage of stories read vs published across recently active subscribers
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
p = r.pipeline()
shared_stories = MSharedStory.objects(story_feed_id=self.pk).count()
subscribing_users = UserSubscription.objects.filter(feed_id=self.pk).values('user_id')
subscribing_user_ids = [sub['user_id'] for sub in subscribing_users]
for user_id in subscribing_user_ids:
user_rs = "RS:%s:%s" % (user_id, self.pk)
p.scard(user_rs)
counts = p.execute()
counts = [c for c in counts if c > 0]
reader_count = len(counts)
story_count = MStory.objects(story_feed_id=self.pk,
story_date__gte=self.unread_cutoff).count()
if reader_count and story_count:
average_pct = (sum(counts) / float(reader_count)) / float(story_count)
else:
average_pct = 0
reach_score = average_pct * reader_count * story_count
return {'read_pct': average_pct, 'reader_count': reader_count,
'reach_score': reach_score, 'story_count': story_count,
'share_count': shared_stories}
@classmethod
def xls_query_popularity(cls, queries, limit):
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell
if isinstance(queries, unicode):
queries = [q.strip() for q in queries.split(',')]
title = 'NewsBlur-%s.xlsx' % slugify('-'.join(queries))
workbook = xlsxwriter.Workbook(title)
bold = workbook.add_format({'bold': 1})
date_format = workbook.add_format({'num_format': 'mmm d yyyy'})
unread_format = workbook.add_format({'font_color': '#E0E0E0'})
for query in queries:
worksheet = workbook.add_worksheet(query)
row = 1
col = 0
worksheet.write(0, col, 'Publisher', bold)
worksheet.set_column(col, col, 15); col += 1
worksheet.write(0, col, 'Feed URL', bold)
worksheet.set_column(col, col, 20); col += 1
worksheet.write(0, col, 'Reach score', bold)
worksheet.write_comment(0, col, 'Feeds are sorted based on this score. It\'s simply the # of readers * # of stories in the past 30 days * the percentage of stories that are actually read.')
worksheet.set_column(col, col, 9); col += 1
worksheet.write(0, col, '# subs', bold)
worksheet.write_comment(0, col, 'Total number of subscribers on NewsBlur, not necessarily active')
worksheet.set_column(col, col, 5); col += 1
worksheet.write(0, col, '# readers', bold)
worksheet.write_comment(0, col, 'Total number of active subscribers who have read a story from the feed in the past 30 days.')
worksheet.set_column(col, col, 8); col += 1
worksheet.write(0, col, "read pct", bold)
worksheet.write_comment(0, col, "Of the active subscribers reading this feed in the past 30 days, this is the percentage of stories the average subscriber reads. Values over 100 pct signify that the feed has many shared stories, which throws off the number slightly but not significantly.")
worksheet.set_column(col, col, 8); col += 1
worksheet.write(0, col, '# stories 30d', bold)
worksheet.write_comment(0, col, "It's important to ignore feeds that haven't published anything in the last 30 days, which is why this is part of the Reach Score.")
worksheet.set_column(col, col, 10); col += 1
worksheet.write(0, col, '# shared', bold)
worksheet.write_comment(0, col, 'Number of stories from this feed that were shared on NewsBlur. This is a strong signal of interest although it is not included in the Reach Score.')
worksheet.set_column(col, col, 7); col += 1
worksheet.write(0, col, '# feed pos', bold)
worksheet.write_comment(0, col, 'Number of times this feed was trained with a thumbs up. Users use training to hide stories they don\'t want to see while highlighting those that they do.')
worksheet.set_column(col, col, 8); col += 1
worksheet.write(0, col, '# feed neg', bold)
worksheet.write_comment(0, col, 'Number of times this feed was trained with a thumbs down. Users use training to hide stories they don\'t want to see while highlighting those that they do.')
worksheet.set_column(col, col, 8); col += 1
worksheet.write(0, col, 'Author', bold)
worksheet.set_column(col, col, 15); col += 1
worksheet.write(0, col, '# author pos', bold)
worksheet.write_comment(0, col, 'Number of times this author was trained with a thumbs up. Users use training to hide stories they don\'t want to see while highlighting those that they do.')
worksheet.set_column(col, col, 10); col += 1
worksheet.write(0, col, '# author neg', bold)
worksheet.write_comment(0, col, 'Number of times this author was trained with a thumbs down. Users use training to hide stories they don\'t want to see while highlighting those that they do.')
worksheet.set_column(col, col, 10); col += 1
worksheet.write(0, col, 'Story title', bold)
worksheet.set_column(col, col, 30); col += 1
worksheet.write(0, col, 'Story URL', bold)
worksheet.set_column(col, col, 20); col += 1
worksheet.write(0, col, 'Story date', bold)
worksheet.set_column(col, col, 10); col += 1
worksheet.write(0, col, 'Tag', bold)
worksheet.set_column(col, col, 15); col += 1
worksheet.write(0, col, 'Tag count', bold)
worksheet.write_comment(0, col, 'Number of times this tag is used in other stories that also contain the search query.')
worksheet.set_column(col, col, 8); col += 1
worksheet.write(0, col, '# tag pos', bold)
worksheet.write_comment(0, col, 'Number of times this tag was trained with a thumbs up. Users use training to hide stories they don\'t want to see while highlighting those that they do.')
worksheet.set_column(col, col, 7); col += 1
worksheet.write(0, col, '# tag neg', bold)
worksheet.write_comment(0, col, 'Number of times this tag was trained with a thumbs down. Users use training to hide stories they don\'t want to see while highlighting those that they do.')
worksheet.set_column(col, col, 7); col += 1
popularity = cls.query_popularity(query, limit=limit)
for feed in popularity:
col = 0
worksheet.write(row, col, feed['feed_title']); col += 1
worksheet.write_url(row, col, feed.get('feed_url') or ""); col += 1
worksheet.conditional_format(row, col, row, col+8, {'type': 'cell',
'criteria': '==',
'value': 0,
'format': unread_format})
worksheet.write(row, col, "=%s*%s*%s" % (
xl_rowcol_to_cell(row, col+2),
xl_rowcol_to_cell(row, col+3),
xl_rowcol_to_cell(row, col+4),
)); col += 1
worksheet.write(row, col, feed['num_subscribers']); col += 1
worksheet.write(row, col, feed['reader_count']); col += 1
worksheet.write(row, col, feed['read_pct']); col += 1
worksheet.write(row, col, feed['story_count']); col += 1
worksheet.write(row, col, feed['share_count']); col += 1
worksheet.write(row, col, feed['ps']); col += 1
worksheet.write(row, col, feed['ng']); col += 1
for author in feed['authors']:
row += 1
worksheet.conditional_format(row, col, row, col+2, {'type': 'cell',
'criteria': '==',
'value': 0,
'format': unread_format})
worksheet.write(row, col, author['name'])
worksheet.write(row, col+1, author['ps'])
worksheet.write(row, col+2, author['ng'])
for story in author['stories']:
worksheet.write(row, col+3, story['title'])
worksheet.write_url(row, col+4, story['url'])
worksheet.write_datetime(row, col+5, story['date'], date_format)
row += 1
for tag in author['tags'].values():
worksheet.conditional_format(row, col+7, row, col+9, {'type': 'cell',
'criteria': '==',
'value': 0,
'format': unread_format})
worksheet.write(row, col+6, tag['name'])
worksheet.write(row, col+7, tag['count'])
worksheet.write(row, col+8, tag['ps'])
worksheet.write(row, col+9, tag['ng'])
row += 1
workbook.close()
return title
def find_stories(self, query, order="newest", offset=0, limit=25):
story_ids = SearchStory.query(feed_ids=[self.pk], query=query, order=order,
offset=offset, limit=limit)
stories_db = MStory.objects(
story_hash__in=story_ids
).order_by('-story_date' if order == "newest" else 'story_date')
stories = self.format_stories(stories_db, self.pk)
return stories
@classmethod
def format_stories(cls, stories_db, feed_id=None, include_permalinks=False):
stories = []
for story_db in stories_db:
story = cls.format_story(story_db, feed_id, include_permalinks=include_permalinks)
stories.append(story)
return stories
@classmethod
def format_story(cls, story_db, feed_id=None, text=False, include_permalinks=False,
show_changes=False):
if isinstance(story_db.story_content_z, unicode):
story_db.story_content_z = story_db.story_content_z.decode('base64')
story_content = ''
latest_story_content = None
has_changes = False
if (not show_changes and
hasattr(story_db, 'story_latest_content_z') and
story_db.story_latest_content_z):
latest_story_content = smart_unicode(zlib.decompress(story_db.story_latest_content_z))
if story_db.story_content_z:
story_content = smart_unicode(zlib.decompress(story_db.story_content_z))
if '<ins' in story_content or '<del' in story_content:
has_changes = True
if not show_changes and latest_story_content:
story_content = latest_story_content
story_title = story_db.story_title
blank_story_title = False
if not story_title:
blank_story_title = True
if story_content:
story_title = strip_tags(story_content)
if not story_title and story_db.story_permalink:
story_title = story_db.story_permalink
if story_title and len(story_title) > 80:
story_title = story_title[:80] + '...'
story = {}
story['story_hash'] = getattr(story_db, 'story_hash', None)
story['story_tags'] = story_db.story_tags or []
story['story_date'] = story_db.story_date.replace(tzinfo=None)
story['story_timestamp'] = story_db.story_date.strftime('%s')
story['story_authors'] = story_db.story_author_name or ""
story['story_title'] = story_title
if blank_story_title:
story['story_title_blank'] = True
story['story_content'] = story_content
story['story_permalink'] = story_db.story_permalink
story['image_urls'] = story_db.image_urls
story['secure_image_urls']= cls.secure_image_urls(story_db.image_urls)
story['secure_image_thumbnails']= cls.secure_image_thumbnails(story_db.image_urls)
story['story_feed_id'] = feed_id or story_db.story_feed_id
story['has_modifications']= has_changes
story['comment_count'] = story_db.comment_count if hasattr(story_db, 'comment_count') else 0
story['comment_user_ids'] = story_db.comment_user_ids if hasattr(story_db, 'comment_user_ids') else []
story['share_count'] = story_db.share_count if hasattr(story_db, 'share_count') else 0
story['share_user_ids'] = story_db.share_user_ids if hasattr(story_db, 'share_user_ids') else []
story['guid_hash'] = story_db.guid_hash if hasattr(story_db, 'guid_hash') else None
if hasattr(story_db, 'source_user_id'):
story['source_user_id'] = story_db.source_user_id
story['id'] = story_db.story_guid or story_db.story_date
if hasattr(story_db, 'starred_date'):
story['starred_date'] = story_db.starred_date
if hasattr(story_db, 'user_tags'):
story['user_tags'] = story_db.user_tags
if hasattr(story_db, 'user_notes'):
story['user_notes'] = story_db.user_notes
if hasattr(story_db, 'highlights'):
story['highlights'] = story_db.highlights
if hasattr(story_db, 'shared_date'):
story['shared_date'] = story_db.shared_date
if hasattr(story_db, 'comments'):
story['comments'] = story_db.comments
if hasattr(story_db, 'user_id'):
story['user_id'] = story_db.user_id
if include_permalinks and hasattr(story_db, 'blurblog_permalink'):
story['blurblog_permalink'] = story_db.blurblog_permalink()
if text:
soup = BeautifulSoup(story['story_content'])
text = ''.join(soup.findAll(text=True))
text = re.sub(r'\n+', '\n\n', text)
text = re.sub(r'\t+', '\t', text)
story['text'] = text
return story
@classmethod
def secure_image_urls(cls, urls):
signed_urls = [create_imageproxy_signed_url(settings.IMAGES_URL,
settings.IMAGES_SECRET_KEY,
url) for url in urls]
return dict(zip(urls, signed_urls))
@classmethod
def secure_image_thumbnails(cls, urls, size=192):
signed_urls = [create_imageproxy_signed_url(settings.IMAGES_URL,
settings.IMAGES_SECRET_KEY,
url,
size) for url in urls]
return dict(zip(urls, signed_urls))
def get_tags(self, entry):
fcat = []
if entry.has_key('tags'):
for tcat in entry.tags:
term = None
if hasattr(tcat, 'label') and tcat.label:
term = tcat.label
elif hasattr(tcat, 'term') and tcat.term:
term = tcat.term
if not term or "CDATA" in term:
continue
qcat = term.strip()
if ',' in qcat or '/' in qcat:
qcat = qcat.replace(',', '/').split('/')
else:
qcat = [qcat]
for zcat in qcat:
tagname = zcat.lower()
while ' ' in tagname:
tagname = tagname.replace(' ', ' ')
tagname = tagname.strip()
if not tagname or tagname == ' ':
continue
fcat.append(tagname)
fcat = [strip_tags(t)[:250] for t in fcat[:12]]
return fcat
@classmethod
def get_permalink(cls, entry):
link = entry.get('link')
if not link:
links = entry.get('links')
if links:
link = links[0].get('href')
if not link:
link = entry.get('id')
return link
def _exists_story(self, story, story_content, existing_stories, new_story_hashes):
story_in_system = None
story_has_changed = False
story_link = self.get_permalink(story)
existing_stories_hashes = existing_stories.keys()
story_pub_date = story.get('published')
# story_published_now = story.get('published_now', False)
# start_date = story_pub_date - datetime.timedelta(hours=8)
# end_date = story_pub_date + datetime.timedelta(hours=8)
for existing_story in existing_stories.values():
content_ratio = 0
# existing_story_pub_date = existing_story.story_date
# print 'Story pub date: %s %s' % (story_published_now, story_pub_date)
if isinstance(existing_story.id, unicode):
# Correcting a MongoDB bug
existing_story.story_guid = existing_story.id
if story.get('story_hash') == existing_story.story_hash:
story_in_system = existing_story
elif (story.get('story_hash') in existing_stories_hashes and
story.get('story_hash') != existing_story.story_hash):
# Story already exists but is not this one
continue
elif (existing_story.story_hash in new_story_hashes and
story.get('story_hash') != existing_story.story_hash):
# Story coming up later
continue
if 'story_latest_content_z' in existing_story:
existing_story_content = smart_unicode(zlib.decompress(existing_story.story_latest_content_z))
elif 'story_latest_content' in existing_story:
existing_story_content = existing_story.story_latest_content
elif 'story_content_z' in existing_story:
existing_story_content = smart_unicode(zlib.decompress(existing_story.story_content_z))
elif 'story_content' in existing_story:
existing_story_content = existing_story.story_content
else:
existing_story_content = u''
# Title distance + content distance, checking if story changed
story_title_difference = abs(levenshtein_distance(story.get('title'),
existing_story.story_title))
title_ratio = difflib.SequenceMatcher(None, story.get('title', ""),
existing_story.story_title).ratio()
if title_ratio < .75: continue
story_timedelta = existing_story.story_date - story_pub_date
if abs(story_timedelta.days) >= 1: continue
seq = difflib.SequenceMatcher(None, story_content, existing_story_content)
similiar_length_min = 1000
if (existing_story.story_permalink == story_link and
existing_story.story_title == story.get('title')):
similiar_length_min = 20
if (seq
and story_content
and len(story_content) > similiar_length_min
and existing_story_content
and seq.real_quick_ratio() > .9
and seq.quick_ratio() > .95):
content_ratio = seq.ratio()
if story_title_difference > 0 and content_ratio > .98:
story_in_system = existing_story
if story_title_difference > 0 or content_ratio < 1.0:
if settings.DEBUG:
logging.debug(" ---> Title difference - %s/%s (%s): %s" % (story.get('title'), existing_story.story_title, story_title_difference, content_ratio))
story_has_changed = True
break
# More restrictive content distance, still no story match
if not story_in_system and content_ratio > .98:
if settings.DEBUG:
logging.debug(" ---> Content difference - %s/%s (%s): %s" % (story.get('title'), existing_story.story_title, story_title_difference, content_ratio))
story_in_system = existing_story
story_has_changed = True
break
if story_in_system and not story_has_changed:
if story_content != existing_story_content:
if settings.DEBUG:
logging.debug(" ---> Content difference - %s (%s)/%s (%s)" % (story.get('title'), len(story_content), existing_story.story_title, len(existing_story_content)))
story_has_changed = True
if story_link != existing_story.story_permalink:
if settings.DEBUG:
logging.debug(" ---> Permalink difference - %s/%s" % (story_link, existing_story.story_permalink))
story_has_changed = True
# if story_pub_date != existing_story.story_date:
# story_has_changed = True
break
# if story_has_changed or not story_in_system:
# print 'New/updated story: %s' % (story),
return story_in_system, story_has_changed
def get_next_scheduled_update(self, force=False, verbose=True, premium_speed=False):
if self.min_to_decay and not force and not premium_speed:
return self.min_to_decay
if premium_speed:
self.active_premium_subscribers += 1
spd = self.stories_last_month / 30.0
subs = (self.active_premium_subscribers +
((self.active_subscribers - self.active_premium_subscribers) / 10.0))
# Calculate sub counts:
# SELECT COUNT(*) FROM feeds WHERE active_premium_subscribers > 10 AND stories_last_month >= 30;
# SELECT COUNT(*) FROM feeds WHERE active_premium_subscribers > 1 AND active_premium_subscribers < 10 AND stories_last_month >= 30;
# SELECT COUNT(*) FROM feeds WHERE active_premium_subscribers = 1 AND stories_last_month >= 30;
# SpD > 1 Subs > 10: t = 6 # 4267 * 1440/6 = 1024080
# SpD > 1 Subs > 1: t = 15 # 18973 * 1440/15 = 1821408
# SpD > 1 Subs = 1: t = 60 # 65503 * 1440/60 = 1572072
# SELECT COUNT(*) FROM feeds WHERE active_premium_subscribers > 1 AND stories_last_month < 30 AND stories_last_month > 0;
# SELECT COUNT(*) FROM feeds WHERE active_premium_subscribers = 1 AND stories_last_month < 30 AND stories_last_month > 0;
# SpD < 1 Subs > 1: t = 60 # 77618 * 1440/60 = 1862832
# SpD < 1 Subs = 1: t = 60 * 12 # 282186 * 1440/(60*12) = 564372
# SELECT COUNT(*) FROM feeds WHERE active_premium_subscribers > 1 AND stories_last_month = 0;
# SELECT COUNT(*) FROM feeds WHERE active_subscribers > 0 AND active_premium_subscribers <= 1 AND stories_last_month = 0;
# SpD = 0 Subs > 1: t = 60 * 3 # 30158 * 1440/(60*3) = 241264
# SpD = 0 Subs = 1: t = 60 * 24 # 514131 * 1440/(60*24) = 514131
if spd >= 1:
if subs >= 10:
total = 6
elif subs > 1:
total = 15
else:
total = 45
elif spd > 0:
if subs > 1:
total = 60 - (spd * 60)
else:
total = 60*12 - (spd * 60*12)
elif spd == 0:
if subs > 1:
total = 60 * 6
elif subs == 1:
total = 60 * 12
else:
total = 60 * 24
months_since_last_story = seconds_timesince(self.last_story_date) / (60*60*24*30)
total *= max(1, months_since_last_story)
# updates_per_day_delay = 3 * 60 / max(.25, ((max(0, self.active_subscribers)**.2)
# * (self.stories_last_month**0.25)))
# if self.active_premium_subscribers > 0:
# updates_per_day_delay /= min(self.active_subscribers+self.active_premium_subscribers, 4)
# updates_per_day_delay = int(updates_per_day_delay)
# Lots of subscribers = lots of updates
# 24 hours for 0 subscribers.
# 4 hours for 1 subscriber.
# .5 hours for 2 subscribers.
# .25 hours for 3 subscribers.
# 1 min for 10 subscribers.
# subscriber_bonus = 6 * 60 / max(.167, max(0, self.active_subscribers)**3)
# if self.premium_subscribers > 0:
# subscriber_bonus /= min(self.active_subscribers+self.premium_subscribers, 5)
# subscriber_bonus = int(subscriber_bonus)
if self.is_push:
fetch_history = MFetchHistory.feed(self.pk)
if len(fetch_history['push_history']):
total = total * 12
# 4 hour max for premiums, 48 hour max for free
if subs >= 1:
total = min(total, 60*4*1)
else:
total = min(total, 60*24*2)
# Craigslist feeds get 6 hours minimum
if 'craigslist' in self.feed_address:
total = max(total, 60*6)
if verbose:
logging.debug(" ---> [%-30s] Fetched every %s min - Subs: %s/%s/%s Stories/day: %s" % (
self.log_title[:30], total,
self.num_subscribers,
self.active_subscribers,
self.active_premium_subscribers,
spd))
return total
def set_next_scheduled_update(self, verbose=False, skip_scheduling=False):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
total = self.get_next_scheduled_update(force=True, verbose=verbose)
error_count = self.error_count
if error_count:
total = total * error_count
total = min(total, 60*24*7)
if verbose:
logging.debug(' ---> [%-30s] ~FBScheduling feed fetch geometrically: '
'~SB%s errors. Time: %s min' % (
self.log_title[:30], self.errors_since_good, total))
random_factor = random.randint(0, total) / 4
next_scheduled_update = datetime.datetime.utcnow() + datetime.timedelta(
minutes = total + random_factor)
original_min_to_decay = self.min_to_decay
self.min_to_decay = total
delta = self.next_scheduled_update - datetime.datetime.now()
minutes_to_next_fetch = (delta.seconds + (delta.days * 24 * 3600)) / 60
if minutes_to_next_fetch > self.min_to_decay or not skip_scheduling:
self.next_scheduled_update = next_scheduled_update
if self.active_subscribers >= 1:
r.zadd('scheduled_updates', self.pk, self.next_scheduled_update.strftime('%s'))
r.zrem('tasked_feeds', self.pk)
r.srem('queued_feeds', self.pk)
updated_fields = ['last_update', 'next_scheduled_update']
if self.min_to_decay != original_min_to_decay:
updated_fields.append('min_to_decay')
self.save(update_fields=updated_fields)
@property
def error_count(self):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
fetch_errors = int(r.zscore('error_feeds', self.pk) or 0)
return fetch_errors + self.errors_since_good
def schedule_feed_fetch_immediately(self, verbose=True):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
if not self.num_subscribers:
logging.debug(' ---> [%-30s] Not scheduling feed fetch immediately, no subs.' % (self.log_title[:30]))
return
if verbose:
logging.debug(' ---> [%-30s] Scheduling feed fetch immediately...' % (self.log_title[:30]))
self.next_scheduled_update = datetime.datetime.utcnow()
r.zadd('scheduled_updates', self.pk, self.next_scheduled_update.strftime('%s'))
return self.save()
def setup_push(self):
from apps.push.models import PushSubscription
try:
push = self.push
except PushSubscription.DoesNotExist:
self.is_push = False
else:
self.is_push = push.verified
self.save()
def queue_pushed_feed_xml(self, xml, latest_push_date_delta=None):
r = redis.Redis(connection_pool=settings.REDIS_FEED_UPDATE_POOL)
queue_size = r.llen("push_feeds")
if latest_push_date_delta:
latest_push_date_delta = "%s" % str(latest_push_date_delta).split('.', 2)[0]
if queue_size > 1000:
self.schedule_feed_fetch_immediately()
else:
logging.debug(' ---> [%-30s] [%s] ~FB~SBQueuing pushed stories, last pushed %s...' % (self.log_title[:30], self.pk, latest_push_date_delta))
self.set_next_scheduled_update()
PushFeeds.apply_async(args=(self.pk, xml), queue='push_feeds')
# def calculate_collocations_story_content(self,
# collocation_measures=TrigramAssocMeasures,
# collocation_finder=TrigramCollocationFinder):
# stories = MStory.objects.filter(story_feed_id=self.pk)
# story_content = ' '.join([s.story_content for s in stories if s.story_content])
# return self.calculate_collocations(story_content, collocation_measures, collocation_finder)
#
# def calculate_collocations_story_title(self,
# collocation_measures=BigramAssocMeasures,
# collocation_finder=BigramCollocationFinder):
# stories = MStory.objects.filter(story_feed_id=self.pk)
# story_titles = ' '.join([s.story_title for s in stories if s.story_title])
# return self.calculate_collocations(story_titles, collocation_measures, collocation_finder)
#
# def calculate_collocations(self, content,
# collocation_measures=TrigramAssocMeasures,
# collocation_finder=TrigramCollocationFinder):
# content = re.sub(r'’', '\'', content)
# content = re.sub(r'&', '&', content)
# try:
# content = unicode(BeautifulStoneSoup(content,
# convertEntities=BeautifulStoneSoup.HTML_ENTITIES))
# except ValueError, e:
# print "ValueError, ignoring: %s" % e
# content = re.sub(r'</?\w+\s+[^>]*>', '', content)
# content = re.split(r"[^A-Za-z-'&]+", content)
#
# finder = collocation_finder.from_words(content)
# finder.apply_freq_filter(3)
# best = finder.nbest(collocation_measures.pmi, 10)
# phrases = [' '.join(phrase) for phrase in best]
#
# return phrases
# class FeedCollocations(models.Model):
# feed = models.ForeignKey(Feed)
# phrase = models.CharField(max_length=500)
class FeedData(models.Model):
feed = AutoOneToOneField(Feed, related_name='data')
feed_tagline = models.CharField(max_length=1024, blank=True, null=True)
story_count_history = models.TextField(blank=True, null=True)
feed_classifier_counts = models.TextField(blank=True, null=True)
popular_tags = models.CharField(max_length=1024, blank=True, null=True)
popular_authors = models.CharField(max_length=2048, blank=True, null=True)
def save(self, *args, **kwargs):
if self.feed_tagline and len(self.feed_tagline) >= 1000:
self.feed_tagline = self.feed_tagline[:1000]
try:
super(FeedData, self).save(*args, **kwargs)
except (IntegrityError, OperationError):
if hasattr(self, 'id') and self.id: self.delete()
except DatabaseError, e:
# Nothing updated
logging.debug(" ---> ~FRNothing updated in FeedData (%s): %s" % (self.feed, e))
pass
class MFeedIcon(mongo.Document):
feed_id = mongo.IntField(primary_key=True)
color = mongo.StringField(max_length=6)
data = mongo.StringField()
icon_url = mongo.StringField()
not_found = mongo.BooleanField(default=False)
meta = {
'collection' : 'feed_icons',
'allow_inheritance' : False,
}
@classmethod
def get_feed(cls, feed_id, create=True):
try:
feed_icon = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(feed_id=feed_id)
except cls.DoesNotExist:
if create:
feed_icon = cls.objects.create(feed_id=feed_id)
else:
feed_icon = None
return feed_icon
def save(self, *args, **kwargs):
if self.icon_url:
self.icon_url = unicode(self.icon_url)
try:
return super(MFeedIcon, self).save(*args, **kwargs)
except (IntegrityError, OperationError):
# print "Error on Icon: %s" % e
if hasattr(self, '_id'): self.delete()
class MFeedPage(mongo.Document):
feed_id = mongo.IntField(primary_key=True)
page_data = mongo.BinaryField()
meta = {
'collection': 'feed_pages',
'allow_inheritance': False,
}
def save(self, *args, **kwargs):
if self.page_data:
self.page_data = zlib.compress(self.page_data)
return super(MFeedPage, self).save(*args, **kwargs)
def page(self):
return zlib.decompress(self.page_data)
@classmethod
def get_data(cls, feed_id):
data = None
feed_page = cls.objects(feed_id=feed_id)
if feed_page:
page_data_z = feed_page[0].page_data
if page_data_z:
data = zlib.decompress(page_data_z)
if not data:
dupe_feed = DuplicateFeed.objects.filter(duplicate_feed_id=feed_id)
if dupe_feed:
feed = dupe_feed[0].feed
feed_page = MFeedPage.objects.filter(feed_id=feed.pk)
if feed_page:
page_data_z = feed_page[0].page_data
if page_data_z:
data = zlib.decompress(feed_page[0].page_data)
return data
class MStory(mongo.Document):
'''A feed item'''
story_feed_id = mongo.IntField()
story_date = mongo.DateTimeField()
story_title = mongo.StringField(max_length=1024)
story_content = mongo.StringField()
story_content_z = mongo.BinaryField()
story_original_content = mongo.StringField()
story_original_content_z = mongo.BinaryField()
story_latest_content = mongo.StringField()
story_latest_content_z = mongo.BinaryField()
original_text_z = mongo.BinaryField()
original_page_z = mongo.BinaryField()
story_content_type = mongo.StringField(max_length=255)
story_author_name = mongo.StringField()
story_permalink = mongo.StringField()
story_guid = mongo.StringField()
story_hash = mongo.StringField()
image_urls = mongo.ListField(mongo.StringField(max_length=1024))
story_tags = mongo.ListField(mongo.StringField(max_length=250))
comment_count = mongo.IntField()
comment_user_ids = mongo.ListField(mongo.IntField())
share_count = mongo.IntField()
share_user_ids = mongo.ListField(mongo.IntField())
meta = {
'collection': 'stories',
'indexes': [('story_feed_id', '-story_date'),
{'fields': ['story_hash'],
'unique': True,
'types': False, }],
'ordering': ['-story_date'],
'allow_inheritance': False,
'cascade': False,
'strict': False,
}
RE_STORY_HASH = re.compile(r"^(\d{1,10}):(\w{6})$")
RE_RS_KEY = re.compile(r"^RS:(\d+):(\d+)$")
@property
def guid_hash(self):
return hashlib.sha1(self.story_guid).hexdigest()[:6]
@classmethod
def guid_hash_unsaved(self, guid):
return hashlib.sha1(guid).hexdigest()[:6]
@property
def feed_guid_hash(self):
return "%s:%s" % (self.story_feed_id, self.guid_hash)
@classmethod
def feed_guid_hash_unsaved(cls, feed_id, guid):
return "%s:%s" % (feed_id, cls.guid_hash_unsaved(guid))
@property
def decoded_story_title(self):
h = HTMLParser.HTMLParser()
return h.unescape(self.story_title)
def save(self, *args, **kwargs):
story_title_max = MStory._fields['story_title'].max_length
story_content_type_max = MStory._fields['story_content_type'].max_length
self.story_hash = self.feed_guid_hash
self.extract_image_urls()
if self.story_content:
self.story_content_z = zlib.compress(smart_str(self.story_content))
self.story_content = None
if self.story_original_content:
self.story_original_content_z = zlib.compress(smart_str(self.story_original_content))
self.story_original_content = None
if self.story_latest_content:
self.story_latest_content_z = zlib.compress(smart_str(self.story_latest_content))
self.story_latest_content = None
if self.story_title and len(self.story_title) > story_title_max:
self.story_title = self.story_title[:story_title_max]
if self.story_content_type and len(self.story_content_type) > story_content_type_max:
self.story_content_type = self.story_content_type[:story_content_type_max]
super(MStory, self).save(*args, **kwargs)
self.sync_redis()
return self
def delete(self, *args, **kwargs):
self.remove_from_redis()
self.remove_from_search_index()
super(MStory, self).delete(*args, **kwargs)
def publish_to_subscribers(self):
try:
r = redis.Redis(connection_pool=settings.REDIS_PUBSUB_POOL)
r.publish("%s:story" % (self.story_feed_id), '%s,%s' % (self.story_hash, self.story_date.strftime('%s')))
except redis.ConnectionError:
logging.debug(" ***> [%-30s] ~BMRedis is unavailable for real-time." % (Feed.get_by_id(self.story_feed_id).title[:30],))
@classmethod
def purge_feed_stories(cls, feed, cutoff, verbose=True):
stories = cls.objects(story_feed_id=feed.pk)
logging.debug(" ---> Deleting %s stories from %s" % (stories.count(), feed))
if stories.count() > cutoff*1.25:
logging.debug(" ***> ~FRToo many stories in %s, not purging..." % (feed))
return
stories.delete()
@classmethod
def index_all_for_search(cls, offset=0):
if not offset:
SearchStory.create_elasticsearch_mapping(delete=True)
last_pk = Feed.objects.latest('pk').pk
for f in xrange(offset, last_pk, 1000):
print " ---> %s / %s (%.2s%%)" % (f, last_pk, float(f)/last_pk*100)
feeds = Feed.objects.filter(pk__in=range(f, f+1000),
active=True,
active_subscribers__gte=1)\
.values_list('pk')
for feed_id, in feeds:
stories = cls.objects.filter(story_feed_id=feed_id)
for story in stories:
story.index_story_for_search()
def index_story_for_search(self):
story_content = self.story_content or ""
if self.story_content_z:
story_content = zlib.decompress(self.story_content_z)
SearchStory.index(story_hash=self.story_hash,
story_title=self.story_title,
story_content=prep_for_search(story_content),
story_tags=self.story_tags,
story_author=self.story_author_name,
story_feed_id=self.story_feed_id,
story_date=self.story_date)
def remove_from_search_index(self):
try:
SearchStory.remove(self.story_hash)
except NotFoundException:
pass
@classmethod
def trim_feed(cls, cutoff, feed_id=None, feed=None, verbose=True):
extra_stories_count = 0
if not feed_id and not feed:
return extra_stories_count
if not feed_id:
feed_id = feed.pk
if not feed:
feed = feed_id
stories = cls.objects(
story_feed_id=feed_id
).only('story_date').order_by('-story_date')
if stories.count() > cutoff:
logging.debug(' ---> [%-30s] ~FMFound %s stories. Trimming to ~SB%s~SN...' %
(unicode(feed)[:30], stories.count(), cutoff))
try:
story_trim_date = stories[cutoff].story_date
if story_trim_date == stories[0].story_date:
# Handle case where every story is the same time
story_trim_date = story_trim_date - datetime.timedelta(seconds=1)
except IndexError, e:
logging.debug(' ***> [%-30s] ~BRError trimming feed: %s' % (unicode(feed)[:30], e))
return extra_stories_count
extra_stories = cls.objects(story_feed_id=feed_id,
story_date__lte=story_trim_date)
extra_stories_count = extra_stories.count()
shared_story_count = 0
for story in extra_stories:
if story.share_count:
shared_story_count += 1
extra_stories_count -= 1
continue
story.delete()
if verbose:
existing_story_count = cls.objects(story_feed_id=feed_id).count()
logging.debug(" ---> Deleted %s stories, %s (%s shared) left." % (
extra_stories_count,
existing_story_count,
shared_story_count))
return extra_stories_count
@classmethod
def find_story(cls, story_feed_id=None, story_id=None, story_hash=None, original_only=False):
from apps.social.models import MSharedStory
original_found = False
if story_hash:
story_id = story_hash
story_hash = cls.ensure_story_hash(story_id, story_feed_id)
if not story_feed_id:
story_feed_id, _ = cls.split_story_hash(story_hash)
if isinstance(story_id, ObjectId):
story = cls.objects(id=story_id).limit(1).first()
else:
story = cls.objects(story_hash=story_hash).limit(1).first()
if story:
original_found = True
if not story and not original_only:
story = MSharedStory.objects.filter(story_feed_id=story_feed_id,
story_hash=story_hash).limit(1).first()
if not story and not original_only:
story = MStarredStory.objects.filter(story_feed_id=story_feed_id,
story_hash=story_hash).limit(1).first()
return story, original_found
@classmethod
def find_by_id(cls, story_ids):
from apps.social.models import MSharedStory
count = len(story_ids)
multiple = isinstance(story_ids, list) or isinstance(story_ids, tuple)
stories = list(cls.objects(id__in=story_ids))
if len(stories) < count:
shared_stories = list(MSharedStory.objects(id__in=story_ids))
stories.extend(shared_stories)
if not multiple:
stories = stories[0]
return stories
@classmethod
def find_by_story_hashes(cls, story_hashes):
from apps.social.models import MSharedStory
count = len(story_hashes)
multiple = isinstance(story_hashes, list) or isinstance(story_hashes, tuple)
stories = list(cls.objects(story_hash__in=story_hashes))
if len(stories) < count:
hashes_found = [s.story_hash for s in stories]
remaining_hashes = list(set(story_hashes) - set(hashes_found))
story_feed_ids = [h.split(':')[0] for h in remaining_hashes]
shared_stories = list(MSharedStory.objects(story_feed_id__in=story_feed_ids,
story_hash__in=remaining_hashes))
stories.extend(shared_stories)
if not multiple:
stories = stories[0]
return stories
@classmethod
def ensure_story_hash(cls, story_id, story_feed_id):
if not cls.RE_STORY_HASH.match(story_id):
story_id = "%s:%s" % (story_feed_id, hashlib.sha1(story_id).hexdigest()[:6])
return story_id
@classmethod
def split_story_hash(cls, story_hash):
matches = cls.RE_STORY_HASH.match(story_hash)
if matches:
groups = matches.groups()
return groups[0], groups[1]
return None, None
@classmethod
def split_rs_key(cls, rs_key):
matches = cls.RE_RS_KEY.match(rs_key)
if matches:
groups = matches.groups()
return groups[0], groups[1]
return None, None
@classmethod
def story_hashes(cls, story_ids):
story_hashes = []
for story_id in story_ids:
story_hash = cls.ensure_story_hash(story_id)
if not story_hash: continue
story_hashes.append(story_hash)
return story_hashes
def sync_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
if self.id and self.story_date > UNREAD_CUTOFF:
feed_key = 'F:%s' % self.story_feed_id
r.sadd(feed_key, self.story_hash)
r.expire(feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.sadd(feed_key, self.story_hash)
# r2.expire(feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
r.zadd('z' + feed_key, self.story_hash, time.mktime(self.story_date.timetuple()))
r.expire('z' + feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
# r2.zadd('z' + feed_key, self.story_hash, time.mktime(self.story_date.timetuple()))
# r2.expire('z' + feed_key, settings.DAYS_OF_STORY_HASHES*24*60*60)
def remove_from_redis(self, r=None):
if not r:
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# if not r2:
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
if self.id:
r.srem('F:%s' % self.story_feed_id, self.story_hash)
# r2.srem('F:%s' % self.story_feed_id, self.story_hash)
r.zrem('zF:%s' % self.story_feed_id, self.story_hash)
# r2.zrem('zF:%s' % self.story_feed_id, self.story_hash)
@classmethod
def sync_feed_redis(cls, story_feed_id):
r = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL)
# r2 = redis.Redis(connection_pool=settings.REDIS_STORY_HASH_POOL2)
UNREAD_CUTOFF = datetime.datetime.now() - datetime.timedelta(days=settings.DAYS_OF_STORY_HASHES)
feed = Feed.get_by_id(story_feed_id)
stories = cls.objects.filter(story_feed_id=story_feed_id, story_date__gte=UNREAD_CUTOFF)
r.delete('F:%s' % story_feed_id)
# r2.delete('F:%s' % story_feed_id)
r.delete('zF:%s' % story_feed_id)
# r2.delete('zF:%s' % story_feed_id)
logging.info(" ---> [%-30s] ~FMSyncing ~SB%s~SN stories to redis" % (feed and feed.log_title[:30] or story_feed_id, stories.count()))
p = r.pipeline()
# p2 = r2.pipeline()
for story in stories:
story.sync_redis(r=p)
p.execute()
# p2.execute()
def count_comments(self):
from apps.social.models import MSharedStory
params = {
'story_guid': self.story_guid,
'story_feed_id': self.story_feed_id,
}
comments = MSharedStory.objects.filter(has_comments=True, **params).only('user_id')
shares = MSharedStory.objects.filter(**params).only('user_id')
self.comment_count = comments.count()
self.comment_user_ids = [c['user_id'] for c in comments]
self.share_count = shares.count()
self.share_user_ids = [s['user_id'] for s in shares]
self.save()
def extract_image_urls(self, force=False, text=False):
if self.image_urls and not force and not text:
return self.image_urls
story_content = None
if not text:
story_content = self.story_content
if not story_content and self.story_content_z:
story_content = zlib.decompress(self.story_content_z)
elif text:
if self.original_text_z:
story_content = zlib.decompress(self.original_text_z)
if not story_content:
return
try:
soup = BeautifulSoup(story_content)
except ValueError:
if not text:
return self.extract_image_urls(force=force, text=True)
else:
return
images = soup.findAll('img')
if not images:
if not text:
return self.extract_image_urls(force=force, text=True)
else:
return
image_urls = self.image_urls
if not image_urls:
image_urls = []
for image in images:
image_url = image.get('src')
if not image_url:
continue
if image_url and len(image_url) >= 1024:
continue
image_urls.append(image_url)
if not image_urls:
if not text:
return self.extract_image_urls(force=force, text=True)
else:
return
if text:
urls = []
for url in image_urls:
if 'http://' in url[1:] or 'https://' in url[1:]:
continue
urls.append(url)
image_urls = urls
ordered_image_urls = []
for image_url in list(set(image_urls)):
if 'feedburner' in image_url:
ordered_image_urls.append(image_url)
else:
ordered_image_urls.insert(0, image_url)
image_urls = ordered_image_urls
if len(image_urls):
self.image_urls = [u for u in image_urls if u]
return self.image_urls
def fetch_original_text(self, force=False, request=None, debug=False):
original_text_z = self.original_text_z
if not original_text_z or force:
feed = Feed.get_by_id(self.story_feed_id)
ti = TextImporter(self, feed=feed, request=request, debug=debug)
original_doc = ti.fetch(return_document=True)
original_text = original_doc.get('content') if original_doc else None
self.extract_image_urls(force=force, text=True)
self.save()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story text, ~SBfound.")
original_text = zlib.decompress(original_text_z)
return original_text
def fetch_original_page(self, force=False, request=None, debug=False):
from apps.rss_feeds.page_importer import PageImporter
if not self.original_page_z or force:
feed = Feed.get_by_id(self.story_feed_id)
importer = PageImporter(request=request, feed=feed, story=self)
original_page = importer.fetch_story()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story page, ~SBfound.")
original_page = zlib.decompress(self.original_page_z)
return original_page
class MStarredStory(mongo.DynamicDocument):
"""Like MStory, but not inherited due to large overhead of _cls and _type in
mongoengine's inheritance model on every single row."""
user_id = mongo.IntField(unique_with=('story_guid',))
starred_date = mongo.DateTimeField()
story_feed_id = mongo.IntField()
story_date = mongo.DateTimeField()
story_title = mongo.StringField(max_length=1024)
story_content = mongo.StringField()
story_content_z = mongo.BinaryField()
story_original_content = mongo.StringField()
story_original_content_z = mongo.BinaryField()
original_text_z = mongo.BinaryField()
story_content_type = mongo.StringField(max_length=255)
story_author_name = mongo.StringField()
story_permalink = mongo.StringField()
story_guid = mongo.StringField()
story_hash = mongo.StringField()
story_tags = mongo.ListField(mongo.StringField(max_length=250))
user_notes = mongo.StringField()
user_tags = mongo.ListField(mongo.StringField(max_length=128))
highlights = mongo.ListField(mongo.StringField(max_length=1024))
image_urls = mongo.ListField(mongo.StringField(max_length=1024))
meta = {
'collection': 'starred_stories',
'indexes': [('user_id', '-starred_date'), ('user_id', 'story_feed_id'),
('user_id', 'story_hash'), 'story_feed_id'],
'ordering': ['-starred_date'],
'allow_inheritance': False,
'strict': False,
}
def __unicode__(self):
try:
user = User.objects.get(pk=self.user_id)
username = user.username
except User.DoesNotExist:
username = '[deleted]'
return "%s: %s (%s)" % (username,
self.story_title[:20],
self.story_feed_id)
def save(self, *args, **kwargs):
if self.story_content:
self.story_content_z = zlib.compress(self.story_content)
self.story_content = None
if self.story_original_content:
self.story_original_content_z = zlib.compress(self.story_original_content)
self.story_original_content = None
self.story_hash = self.feed_guid_hash
return super(MStarredStory, self).save(*args, **kwargs)
@classmethod
def find_stories(cls, query, user_id, tag=None, offset=0, limit=25, order="newest"):
stories_db = cls.objects(
Q(user_id=user_id) &
(Q(story_title__icontains=query) |
Q(story_author_name__icontains=query) |
Q(story_tags__icontains=query))
)
if tag:
stories_db = stories_db.filter(user_tags__contains=tag)
stories_db = stories_db.order_by('%sstarred_date' %
('-' if order == "newest" else ""))[offset:offset+limit]
stories = Feed.format_stories(stories_db)
return stories
@classmethod
def find_stories_by_user_tag(cls, user_tag, user_id, offset=0, limit=25):
stories_db = cls.objects(
Q(user_id=user_id),
Q(user_tags__icontains=user_tag)
).order_by('-starred_date')[offset:offset+limit]
stories = Feed.format_stories(stories_db)
return stories
@classmethod
def trim_old_stories(cls, stories=10, days=90, dryrun=False):
print " ---> Fetching starred story counts..."
stats = settings.MONGODB.newsblur.starred_stories.aggregate([{
"$group": {
"_id": "$user_id",
"stories": {"$sum": 1},
},
}, {
"$match": {
"stories": {"$gte": stories}
},
}])
month_ago = datetime.datetime.now() - datetime.timedelta(days=days)
user_ids = list(stats)
user_ids = sorted(user_ids, key=lambda x:x['stories'], reverse=True)
print " ---> Found %s users with more than %s starred stories" % (len(user_ids), stories)
total = 0
for stat in user_ids:
try:
user = User.objects.select_related('profile').get(pk=stat['_id'])
except User.DoesNotExist:
user = None
if user and (user.profile.is_premium or user.profile.last_seen_on > month_ago):
continue
total += stat['stories']
username = "%s (%s)" % (user and user.username or " - ", stat['_id'])
print " ---> %19.19s: %-20.20s %s stories" % (user and user.profile.last_seen_on or "Deleted",
username,
stat['stories'])
if not dryrun and stat['_id']:
cls.objects.filter(user_id=stat['_id']).delete()
elif not dryrun and stat['_id'] == 0:
print " ---> Deleting unstarred stories (user_id = 0)"
cls.objects.filter(user_id=stat['_id']).delete()
print " ---> Deleted %s stories in total." % total
@property
def guid_hash(self):
return hashlib.sha1(self.story_guid).hexdigest()[:6]
@property
def feed_guid_hash(self):
return "%s:%s" % (self.story_feed_id or "0", self.guid_hash)
def fetch_original_text(self, force=False, request=None, debug=False):
original_text_z = self.original_text_z
feed = Feed.get_by_id(self.story_feed_id)
if not original_text_z or force:
ti = TextImporter(self, feed=feed, request=request, debug=debug)
original_text = ti.fetch()
else:
logging.user(request, "~FYFetching ~FGoriginal~FY story text, ~SBfound.")
original_text = zlib.decompress(original_text_z)
return original_text
def fetch_original_page(self, force=False, request=None, debug=False):
return None
class MStarredStoryCounts(mongo.Document):
user_id = mongo.IntField()
tag = mongo.StringField(max_length=128)
feed_id = mongo.IntField()
is_highlights = mongo.BooleanField()
slug = mongo.StringField(max_length=128)
count = mongo.IntField(default=0)
meta = {
'collection': 'starred_stories_counts',
'indexes': ['user_id'],
'ordering': ['tag'],
'allow_inheritance': False,
}
def __unicode__(self):
if self.tag:
return "Tag: %s (%s)" % (self.tag, self.count)
elif self.feed_id:
return "Feed: %s (%s)" % (self.feed_id, self.count)
elif self.is_highlights:
return "Highlights: %s (%s)" % (self.is_highlights, self.count)
return "%s/%s/%s" % (self.tag, self.feed_id, self.is_highlights)
@property
def rss_url(self, secret_token=None):
if self.feed_id:
return
if not secret_token:
user = User.objects.select_related('profile').get(pk=self.user_id)
secret_token = user.profile.secret_token
slug = self.slug if self.slug else ""
return "%s/reader/starred_rss/%s/%s/%s" % (settings.NEWSBLUR_URL, self.user_id,
secret_token, slug)
@classmethod
def user_counts(cls, user_id, include_total=False, try_counting=True):
counts = cls.objects.filter(user_id=user_id)
counts = sorted([{'tag': c.tag,
'count': c.count,
'is_highlights': c.is_highlights,
'feed_address': c.rss_url,
'feed_id': c.feed_id}
for c in counts],
key=lambda x: (x.get('tag', '') or '').lower())
total = 0
feed_total = 0
for c in counts:
if not c['tag'] and not c['feed_id'] and not c['is_highlights']:
total = c['count']
if c['feed_id']:
feed_total += c['count']
if try_counting and (total != feed_total or not len(counts)):
user = User.objects.get(pk=user_id)
logging.user(user, "~FC~SBCounting~SN saved stories (%s total vs. %s counted)..." %
(total, feed_total))
cls.count_for_user(user_id)
return cls.user_counts(user_id, include_total=include_total,
try_counting=False)
if include_total:
return counts, total
return counts
@classmethod
def schedule_count_tags_for_user(cls, user_id):
ScheduleCountTagsForUser.apply_async(kwargs=dict(user_id=user_id))
@classmethod
def count_for_user(cls, user_id, total_only=False):
user_tags = []
user_feeds = []
highlights = 0
if not total_only:
cls.objects(user_id=user_id).delete()
try:
user_tags = cls.count_tags_for_user(user_id)
highlights = cls.count_highlights_for_user(user_id)
user_feeds = cls.count_feeds_for_user(user_id)
except pymongo.errors.OperationFailure, e:
logging.debug(" ---> ~FBOperationError on mongo: ~SB%s" % e)
total_stories_count = MStarredStory.objects(user_id=user_id).count()
cls.objects(user_id=user_id, tag=None, feed_id=None, is_highlights=None).update_one(set__count=total_stories_count,
upsert=True)
return dict(total=total_stories_count, tags=user_tags, feeds=user_feeds, highlights=highlights)
@classmethod
def count_tags_for_user(cls, user_id):
all_tags = MStarredStory.objects(user_id=user_id,
user_tags__exists=True).item_frequencies('user_tags')
user_tags = sorted([(k, v) for k, v in all_tags.items() if int(v) > 0 and k],
key=lambda x: x[0].lower(),
reverse=True)
for tag, count in dict(user_tags).items():
cls.objects(user_id=user_id, tag=tag, slug=slugify(tag)).update_one(set__count=count,
upsert=True)
return user_tags
@classmethod
def count_highlights_for_user(cls, user_id):
highlighted_count = MStarredStory.objects(user_id=user_id,
highlights__exists=True,
__raw__={"$where": "this.highlights.length > 0"}).count()
cls.objects(user_id=user_id,
is_highlights=True, slug="highlights").update_one(set__count=highlighted_count, upsert=True)
return highlighted_count
@classmethod
def count_feeds_for_user(cls, user_id):
all_feeds = MStarredStory.objects(user_id=user_id).item_frequencies('story_feed_id')
user_feeds = dict([(k, v) for k, v in all_feeds.items() if v])
# Clean up None'd and 0'd feed_ids, so they can be counted against the total
if user_feeds.get(None, False):
user_feeds[0] = user_feeds.get(0, 0)
user_feeds[0] += user_feeds.get(None)
del user_feeds[None]
if user_feeds.get(0, False):
user_feeds[-1] = user_feeds.get(0, 0)
del user_feeds[0]
too_many_feeds = False if len(user_feeds) < 1000 else True
for feed_id, count in user_feeds.items():
if too_many_feeds and count <= 1: continue
cls.objects(user_id=user_id,
feed_id=feed_id,
slug="feed:%s" % feed_id).update_one(set__count=count,
upsert=True)
return user_feeds
@classmethod
def adjust_count(cls, user_id, feed_id=None, tag=None, highlights=None, amount=0):
params = dict(user_id=user_id)
if feed_id:
params['feed_id'] = feed_id
if tag:
params['tag'] = tag
if highlights:
params['is_highlights'] = True
cls.objects(**params).update_one(inc__count=amount, upsert=True)
try:
story_count = cls.objects.get(**params)
except cls.MultipleObjectsReturned:
story_count = cls.objects(**params).first()
if story_count and story_count.count <= 0:
story_count.delete()
class MSavedSearch(mongo.Document):
user_id = mongo.IntField()
query = mongo.StringField(max_length=1024)
feed_id = mongo.StringField()
slug = mongo.StringField(max_length=128)
meta = {
'collection': 'saved_searches',
'indexes': ['user_id',
{'fields': ['user_id', 'feed_id', 'query'],
'unique': True,
'types': False, }],
'ordering': ['query'],
'allow_inheritance': False,
}
@property
def rss_url(self, secret_token=None):
if not secret_token:
user = User.objects.select_related('profile').get(pk=self.user_id)
secret_token = user.profile.secret_token
slug = self.slug if self.slug else ""
return "%s/reader/saved_search/%s/%s/%s" % (settings.NEWSBLUR_URL, self.user_id,
secret_token, slug)
@classmethod
def user_searches(cls, user_id):
searches = cls.objects.filter(user_id=user_id)
searches = sorted([{'query': s.query,
'feed_address': s.rss_url,
'feed_id': s.feed_id,
} for s in searches],
key=lambda x: (x.get('query', '') or '').lower())
return searches
@classmethod
def save_search(cls, user_id, feed_id, query):
user = User.objects.get(pk=user_id)
params = dict(user_id=user_id,
feed_id=feed_id,
query=query,
slug=slugify(query))
try:
saved_search = cls.objects.get(**params)
logging.user(user, "~FRSaved search already exists: ~SB%s" % query)
except cls.DoesNotExist:
logging.user(user, "~FCCreating a saved search: ~SB%s~SN/~SB%s" % (feed_id, query))
saved_search = cls.objects.create(**params)
return saved_search
@classmethod
def delete_search(cls, user_id, feed_id, query):
user = User.objects.get(pk=user_id)
params = dict(user_id=user_id,
feed_id=feed_id,
query=query)
try:
saved_search = cls.objects.get(**params)
logging.user(user, "~FCDeleting saved search: ~SB%s" % query)
saved_search.delete()
except cls.DoesNotExist:
logging.user(user, "~FRCan't delete saved search, missing: ~SB%s~SN/~SB%s" % (feed_id, query))
except cls.MultipleObjectsReturned:
logging.user(user, "~FRFound multiple saved searches, deleting: ~SB%s~SN/~SB%s" % (feed_id, query))
cls.objects(**params).delete()
class MFetchHistory(mongo.Document):
feed_id = mongo.IntField(unique=True)
feed_fetch_history = mongo.DynamicField()
page_fetch_history = mongo.DynamicField()
push_history = mongo.DynamicField()
raw_feed_history = mongo.DynamicField()
meta = {
'db_alias': 'nbanalytics',
'collection': 'fetch_history',
'allow_inheritance': False,
}
@classmethod
def feed(cls, feed_id, timezone=None, fetch_history=None):
if not fetch_history:
try:
fetch_history = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(feed_id=feed_id)
except cls.DoesNotExist:
fetch_history = cls.objects.create(feed_id=feed_id)
history = {}
for fetch_type in ['feed_fetch_history', 'page_fetch_history', 'push_history']:
history[fetch_type] = getattr(fetch_history, fetch_type)
if not history[fetch_type]:
history[fetch_type] = []
for f, fetch in enumerate(history[fetch_type]):
date_key = 'push_date' if fetch_type == 'push_history' else 'fetch_date'
history[fetch_type][f] = {
date_key: localtime_for_timezone(fetch[0],
timezone).strftime("%Y-%m-%d %H:%M:%S"),
'status_code': fetch[1],
'message': fetch[2]
}
return history
@classmethod
def add(cls, feed_id, fetch_type, date=None, message=None, code=None, exception=None):
if not date:
date = datetime.datetime.now()
try:
fetch_history = cls.objects.read_preference(pymongo.ReadPreference.PRIMARY)\
.get(feed_id=feed_id)
except cls.DoesNotExist:
fetch_history = cls.objects.create(feed_id=feed_id)
if fetch_type == 'feed':
history = fetch_history.feed_fetch_history or []
elif fetch_type == 'page':
history = fetch_history.page_fetch_history or []
elif fetch_type == 'push':
history = fetch_history.push_history or []
elif fetch_type == 'raw_feed':
history = fetch_history.raw_feed_history or []
history = [[date, code, message]] + history
any_exceptions = any([c for d, c, m in history if c not in [200, 304]])
if any_exceptions:
history = history[:25]
elif fetch_type == 'raw_feed':
history = history[:10]
else:
history = history[:5]
if fetch_type == 'feed':
fetch_history.feed_fetch_history = history
elif fetch_type == 'page':
fetch_history.page_fetch_history = history
elif fetch_type == 'push':
fetch_history.push_history = history
elif fetch_type == 'raw_feed':
fetch_history.raw_feed_history = history
fetch_history.save()
if fetch_type == 'feed':
RStats.add('feed_fetch')
return cls.feed(feed_id, fetch_history=fetch_history)
class DuplicateFeed(models.Model):
duplicate_address = models.CharField(max_length=764, db_index=True)
duplicate_link = models.CharField(max_length=764, null=True, db_index=True)
duplicate_feed_id = models.CharField(max_length=255, null=True, db_index=True)
feed = models.ForeignKey(Feed, related_name='duplicate_addresses')
def __unicode__(self):
return "%s: %s / %s" % (self.feed, self.duplicate_address, self.duplicate_link)
def canonical(self):
return {
'duplicate_address': self.duplicate_address,
'duplicate_link': self.duplicate_link,
'duplicate_feed_id': self.duplicate_feed_id,
'feed_id': self.feed_id
}
def save(self, *args, **kwargs):
max_address = DuplicateFeed._meta.get_field('duplicate_address').max_length
if len(self.duplicate_address) > max_address:
self.duplicate_address = self.duplicate_address[:max_address]
max_link = DuplicateFeed._meta.get_field('duplicate_link').max_length
if self.duplicate_link and len(self.duplicate_link) > max_link:
self.duplicate_link = self.duplicate_link[:max_link]
super(DuplicateFeed, self).save(*args, **kwargs)
def merge_feeds(original_feed_id, duplicate_feed_id, force=False):
from apps.reader.models import UserSubscription
from apps.social.models import MSharedStory
if original_feed_id == duplicate_feed_id:
logging.info(" ***> Merging the same feed. Ignoring...")
return original_feed_id
try:
original_feed = Feed.objects.get(pk=original_feed_id)
duplicate_feed = Feed.objects.get(pk=duplicate_feed_id)
except Feed.DoesNotExist:
logging.info(" ***> Already deleted feed: %s" % duplicate_feed_id)
return original_feed_id
heavier_dupe = original_feed.num_subscribers < duplicate_feed.num_subscribers
branched_original = original_feed.branch_from_feed and not duplicate_feed.branch_from_feed
if (heavier_dupe or branched_original) and not force:
original_feed, duplicate_feed = duplicate_feed, original_feed
original_feed_id, duplicate_feed_id = duplicate_feed_id, original_feed_id
if branched_original:
original_feed.feed_address = duplicate_feed.feed_address
logging.info(" ---> Feed: [%s - %s] %s - %s" % (original_feed_id, duplicate_feed_id,
original_feed, original_feed.feed_link))
logging.info(" Orig ++> %s: (%s subs) %s / %s %s" % (original_feed.pk,
original_feed.num_subscribers,
original_feed.feed_address,
original_feed.feed_link,
" [B: %s]" % original_feed.branch_from_feed.pk if original_feed.branch_from_feed else ""))
logging.info(" Dupe --> %s: (%s subs) %s / %s %s" % (duplicate_feed.pk,
duplicate_feed.num_subscribers,
duplicate_feed.feed_address,
duplicate_feed.feed_link,
" [B: %s]" % duplicate_feed.branch_from_feed.pk if duplicate_feed.branch_from_feed else ""))
original_feed.branch_from_feed = None
user_subs = UserSubscription.objects.filter(feed=duplicate_feed).order_by('-pk')
for user_sub in user_subs:
user_sub.switch_feed(original_feed, duplicate_feed)
def delete_story_feed(model, feed_field='feed_id'):
duplicate_stories = model.objects(**{feed_field: duplicate_feed.pk})
# if duplicate_stories.count():
# logging.info(" ---> Deleting %s %s" % (duplicate_stories.count(), model))
duplicate_stories.delete()
delete_story_feed(MStory, 'story_feed_id')
delete_story_feed(MFeedPage, 'feed_id')
try:
DuplicateFeed.objects.create(
duplicate_address=duplicate_feed.feed_address,
duplicate_link=duplicate_feed.feed_link,
duplicate_feed_id=duplicate_feed.pk,
feed=original_feed
)
except (IntegrityError, OperationError), e:
logging.info(" ***> Could not save DuplicateFeed: %s" % e)
# Switch this dupe feed's dupe feeds over to the new original.
duplicate_feeds_duplicate_feeds = DuplicateFeed.objects.filter(feed=duplicate_feed)
for dupe_feed in duplicate_feeds_duplicate_feeds:
dupe_feed.feed = original_feed
dupe_feed.duplicate_feed_id = duplicate_feed.pk
dupe_feed.save()
logging.debug(' ---> Dupe subscribers (%s): %s, Original subscribers (%s): %s' %
(duplicate_feed.pk, duplicate_feed.num_subscribers,
original_feed.pk, original_feed.num_subscribers))
if duplicate_feed.pk != original_feed.pk:
duplicate_feed.delete()
else:
logging.debug(" ***> Duplicate feed is the same as original feed. Panic!")
logging.debug(' ---> Deleted duplicate feed: %s/%s' % (duplicate_feed, duplicate_feed_id))
original_feed.branch_from_feed = None
original_feed.count_subscribers()
original_feed.save()
logging.debug(' ---> Now original subscribers: %s' %
(original_feed.num_subscribers))
MSharedStory.switch_feed(original_feed_id, duplicate_feed_id)
return original_feed_id
def rewrite_folders(folders, original_feed, duplicate_feed):
new_folders = []
for k, folder in enumerate(folders):
if isinstance(folder, int):
if folder == duplicate_feed.pk:
# logging.info(" ===> Rewrote %s'th item: %s" % (k+1, folders))
new_folders.append(original_feed.pk)
else:
new_folders.append(folder)
elif isinstance(folder, dict):
for f_k, f_v in folder.items():
new_folders.append({f_k: rewrite_folders(f_v, original_feed, duplicate_feed)})
return new_folders
| 45.375402
| 302
| 0.569862
|
3ba998266f9548b51dffe46f2188ee66b2f350f7
| 5,626
|
py
|
Python
|
src/gtk-client/client.py
|
lalaveine/VoiceAssistant
|
3742de4b305ba0e9bd34f3a9914a20ec64e17397
|
[
"MIT"
] | null | null | null |
src/gtk-client/client.py
|
lalaveine/VoiceAssistant
|
3742de4b305ba0e9bd34f3a9914a20ec64e17397
|
[
"MIT"
] | 3
|
2021-02-08T20:33:39.000Z
|
2021-06-02T00:00:21.000Z
|
src/gtk-client/client.py
|
lalaveine/VoiceAssistant
|
3742de4b305ba0e9bd34f3a9914a20ec64e17397
|
[
"MIT"
] | 1
|
2020-04-17T18:40:09.000Z
|
2020-04-17T18:40:09.000Z
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
# import model
from src.model.datamanager import SupportedApplications
# DBus
from pydbus import SessionBus
import threading
class DBusService(object):
"""
<node>
<interface name='org.LinuxAssistantClient'>
<method name='probe_connection'>
<arg type='b' name='response' direction='out'/>
</method>
<method name='print_text'>
<arg type='s' name='text' direction='in'/>
<arg type='b' name='is_user' direction='in'/>
</method>
<method name='echo_string'>
<arg type='s' name='a' direction='in'/>
<arg type='s' name='response' direction='out'/>
</method>
<method name='quit'/>
</interface>
</node>
"""
def probe_connection(self):
return True
def print_text(self, text, is_user):
print("Print text")
if is_user is True:
add_row(text, "right")
else:
add_row(text, "left")
def echo_string(self, s):
"""returns whatever is passed to it"""
return s
class Handler:
def add_app_button_clicked(self, *args):
print("Add command")
add_app_dialog.show_all()
response = add_app_dialog.run()
name_entry = builder.get_object("add_app_name_entry")
command_entry = builder.get_object("add_command_entry")
if response == -10:
print("Apply")
SupportedApplications.add_entry(name_entry.get_text(), command_entry.get_text())
draw_table(True)
name_entry.set_text("")
command_entry.set_text("")
def edit_app_button_clicked(self, *args):
print("edit command")
name_entry = builder.get_object("edit_app_name_entry")
command_entry = builder.get_object("edit_command_entry")
(model, iter) = select.get_selected()
print((model[iter][0]))
name_entry.set_text((model[iter][0]))
command_entry.set_text((model[iter][1]))
edit_app_dialog.show_all()
response = edit_app_dialog.run()
if response == -10:
print("Apply")
SupportedApplications.edit_entry((model[iter][0]), name_entry.get_text(), command_entry.get_text())
draw_table(True)
name_entry.set_text("")
command_entry.set_text("")
def remove_app_button_clicked(self, *args):
print("remove command")
(model, iter) = select.get_selected()
SupportedApplications.remove_entry((model[iter][0]))
draw_table(True)
def on_record_button_clicked(self, *args):
thread = threading.Thread(target=server.wakeup_call)
thread.daemon = True
thread.start()
def on_dialog_delete_event(self, dialog, event):
dialog.hide()
return True
def on_response(self, dialog, response_id):
dialog.hide()
def on_tree_selection_changed(selection):
model, treeiter = selection.get_selected()
if treeiter is not None:
print("You selected", model[treeiter][0])
def main_window_destroy(self, *args):
Gtk.main_quit()
def get_app_list_store():
store = Gtk.ListStore(str, str)
app_list = SupportedApplications.select().order_by(SupportedApplications.app_name)
for app in app_list:
store.append([app.app_name, app.terminal_command])
return store
# def draw_table(redraw=False):
# store = get_app_list_store()
#
# app_list_treeview.set_model(store)
#
# renderer = Gtk.CellRendererText()
#
# if not redraw:
# column_app = Gtk.TreeViewColumn("Приложение", renderer, text=0)
# app_list_treeview.append_column(column_app)
#
# column_command = Gtk.TreeViewColumn("Команда", renderer, text=1)
# app_list_treeview.append_column(column_command)
def add_row(text, text_alignment):
row = Gtk.ListBoxRow()
gtkbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
row.add(gtkbox)
if text_alignment is "right":
message = Gtk.Label(text, xalign=1)
elif text_alignment is "left":
message = Gtk.Label(text, xalign=0)
gtkbox.pack_start(message, True, True, 0)
assistant_listbox.add(row)
assistant_listbox.show_all()
if __name__ == '__main__':
builder = Gtk.Builder()
builder.add_from_file("client1.glade")
builder.connect_signals(Handler())
main_window = builder.get_object("main_window")
# add_app_dialog = builder.get_object("add_app_dialog")
# edit_app_dialog = builder.get_object("edit_app_dialog")
# app_list_treeview = builder.get_object("app_list_treeview")
server_status_label = builder.get_object("server_status_label")
assistant_listbox = builder.get_object("assistant_listbox")
# select = app_list_treeview.get_selection()
# select.connect("changed", Handler.on_tree_selection_changed)
# draw_table()
client_bus = SessionBus()
client_bus.publish("org.LinuxAssistantClient", DBusService())
server_bus = SessionBus()
is_server_running = False
try:
server = server_bus.get("org.LinuxAssistantServer")
is_server_running = server.client_init()
except:
print("Server is not running")
if is_server_running is True:
server_status_label.set_text("Server is running")
else:
server_status_label.set_text("Server is not running")
main_window.show_all()
Gtk.main()
| 29.925532
| 111
| 0.635087
|
ae00a3d8222dce0e1279e4f7e14ff28cd42bb78d
| 4,024
|
py
|
Python
|
graphql_compiler/schema_generation/sqlalchemy/__init__.py
|
manesioz/graphql-compiler
|
b23be9c4a8e26f8c82e741625e04f7c9ac2e623b
|
[
"Apache-2.0"
] | null | null | null |
graphql_compiler/schema_generation/sqlalchemy/__init__.py
|
manesioz/graphql-compiler
|
b23be9c4a8e26f8c82e741625e04f7c9ac2e623b
|
[
"Apache-2.0"
] | null | null | null |
graphql_compiler/schema_generation/sqlalchemy/__init__.py
|
manesioz/graphql-compiler
|
b23be9c4a8e26f8c82e741625e04f7c9ac2e623b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-present Kensho Technologies, LLC.
from ...schema.schema_info import SQLAlchemySchemaInfo
from ..graphql_schema import get_graphql_schema_from_schema_graph
from .edge_descriptors import get_join_descriptors_from_edge_descriptors
from .schema_graph_builder import get_sqlalchemy_schema_graph
def get_sqlalchemy_schema_info(
vertex_name_to_table, direct_edges, dialect, class_to_field_type_overrides=None
):
"""Return a SQLAlchemySchemaInfo from the metadata.
Relational databases are supported by compiling to SQLAlchemy core as an intermediate
language, and then relying on SQLAlchemy's compilation of the dialect-specific SQL string to
query the target database.
Constructing the SQLAlchemySchemaInfo class, which contains all the info required to compile
SQL queries, requires the use of SQLAlchemy Table objects representing underlying SQL
tables. These can be autogenerated from a SQL database through the reflect() method in a
SQLAlchemy Metadata object. It is also possible to construct the SQLAlchemy Table objects by
using the class's init method and specifying the needed metadata. If you choose to use the
the latter manner, make sure to properly define the optional schema and primary_key fields since
the compiler relies on these to compile GraphQL to SQL.
Args:
vertex_name_to_table: dict, str -> SQLAlchemy Table. This dictionary is used to generate the
GraphQL objects in the schema in the SQLAlchemySchemaInfo. Each
SQLAlchemyTable will be represented as a GraphQL object. The GraphQL
object names are the dictionary keys. The fields of the GraphQL
objects will be inferred from the columns of the underlying tables.
The fields will have the same name as the underlying columns and
columns with unsupported types, (SQL types with no matching GraphQL
type), will be ignored.
direct_edges: dict, str-> DirectEdgeDescriptor. The traversal of a direct
edge gets compiled to a SQL join in graphql_to_sql(). Therefore, each
DirectEdgeDescriptor not only specifies the source and destination GraphQL
objects, but also which columns to use to use when generating a SQL join
between the underlying source and destination tables. The names of the edges
are the keys in the dictionary and the edges will be rendered as vertex fields
named out_<edgeName> and in_<edgeName> in the source and destination GraphQL
objects respectively. The direct edge names must not conflict with the GraphQL
object names.
dialect: sqlalchemy.engine.interfaces.Dialect, specifying the dialect we are compiling to
(e.g. sqlalchemy.dialects.mssql.dialect()).
class_to_field_type_overrides: optional dict, class name -> {field name -> field type},
(string -> {string -> GraphQLType}). Used to override the
type of a field in the class where it's first defined and all
the class's subclasses.
Returns:
SQLAlchemySchemaInfo containing the full information needed to compile SQL queries.
"""
schema_graph = get_sqlalchemy_schema_graph(vertex_name_to_table, direct_edges)
graphql_schema, type_equivalence_hints = get_graphql_schema_from_schema_graph(
schema_graph,
class_to_field_type_overrides=class_to_field_type_overrides,
hidden_classes=set(),
)
join_descriptors = get_join_descriptors_from_edge_descriptors(direct_edges)
return SQLAlchemySchemaInfo(
graphql_schema, type_equivalence_hints, dialect, vertex_name_to_table, join_descriptors
)
| 60.969697
| 100
| 0.69334
|
ca45cbe6be7d553e55704f730055f27d85a044e6
| 2,771
|
py
|
Python
|
src/ext/glbinding/codegeneration/scripts/gen_types.py
|
carl-221b/gen_planete
|
89cf1096363c8f1e1ee0de3221e3cd5057862391
|
[
"MIT"
] | null | null | null |
src/ext/glbinding/codegeneration/scripts/gen_types.py
|
carl-221b/gen_planete
|
89cf1096363c8f1e1ee0de3221e3cd5057862391
|
[
"MIT"
] | null | null | null |
src/ext/glbinding/codegeneration/scripts/gen_types.py
|
carl-221b/gen_planete
|
89cf1096363c8f1e1ee0de3221e3cd5057862391
|
[
"MIT"
] | 1
|
2021-07-01T07:45:44.000Z
|
2021-07-01T07:45:44.000Z
|
from binding import *
from classes.Type import *
REGULAR_TYPE_INTEGRATIONS = {
"GLextension" : [ "hashable", "streamable" ],
"GLboolean" : [ "streamable" ],
"GLenum" : [ "hashable", "streamable", "addable", "comparable" ]
}
BITFIELD_TYPE_INTEGRATIONS = [ "hashable", "bitfieldStreamable", "bitOperatable"]
TYPE_INTEGRATIONS = [ "addable", "bitOperatable", "bitfieldStreamable", "comparable", "hashable", "streamable"]
def integrationMap(integrationList):
return { integration: (integration in integrationList) for integration in TYPE_INTEGRATIONS}
def typeIntegrationMap(type):
return integrationMap(REGULAR_TYPE_INTEGRATIONS[type.name] if type.name in REGULAR_TYPE_INTEGRATIONS else [])
# ToDo: move this to Type class? (as well as convert an multiline convert)
enum_classes = [ "GLenum" ]
def convertTypedefLine(line, name):
if not line.startswith("typedef"):
return line
else:
return "using " + name + " = " + line[8:].replace(name, "")
def multilineConvertTypedef(type):
return "\n".join([ convertTypedefLine(line, type.name) for line in type.value.split('\n') ])
def convertTypedef(type):
if '\n' in type.value:
return multilineConvertTypedef(type)
t = parseType(type)
if type.name in enum_classes:
return "enum class " + type.name + " : " + t + ";"
if not type.value.startswith("typedef"):
return t
elif type.name == "GLboolean":
return "// Import of GLboolean is an include"
else:
return "using " + type.name + " = " + t + ";"
def convertType(type):
return convertTypedef(type).replace(" ;", ";").replace("( *)", "(*)").replace("(*)", "(GL_APIENTRY *)")
def genTypeContexts(types, bitfGroups):
typeContexts = [{"identifier": "GLextension",
"definition": "enum class GLextension : int;",
"integrations": integrationMap([ "hashable", "streamable" ]),
"hasIntegrations": True}]
for type in types: #TODO-LW: explicitly sort types and bitfGroups
integrations = typeIntegrationMap(type)
typeContexts.append({"identifier": type.name,
"definition": convertType(type),
"integrations": integrations,
"hasIntegrations": any(integrations.values()) })
for bitf in bitfGroups:
integrations = integrationMap(BITFIELD_TYPE_INTEGRATIONS)
typeContexts.append({"identifier": bitf.name,
"definition": "enum class {} : unsigned int;".format(bitf.name),
"integrations": integrations,
"hasIntegrations": any(integrations.values()) })
return typeContexts
| 34.6375
| 113
| 0.619632
|
ffb0402c174027e873efa47a34380cb1763f72e2
| 1,278
|
py
|
Python
|
negociant/trader/app/algoTrading/testRpc.py
|
XilinJia/Negociant
|
53a0c0b4f40bd6d3ea1955745360f78dacfc71c1
|
[
"MIT"
] | null | null | null |
negociant/trader/app/algoTrading/testRpc.py
|
XilinJia/Negociant
|
53a0c0b4f40bd6d3ea1955745360f78dacfc71c1
|
[
"MIT"
] | null | null | null |
negociant/trader/app/algoTrading/testRpc.py
|
XilinJia/Negociant
|
53a0c0b4f40bd6d3ea1955745360f78dacfc71c1
|
[
"MIT"
] | null | null | null |
# encoding: UTF-8
from __future__ import print_function
from time import sleep
from six.moves import input
from negociant.rpc import RpcClient
from negociant.trader.vtConstant import OFFSET_OPEN, DIRECTION_LONG
########################################################################
class TestClient(RpcClient):
""""""
#----------------------------------------------------------------------
def __init__(self, reqAddress, subAddress):
"""Constructor"""
super(TestClient, self).__init__(reqAddress, subAddress)
#----------------------------------------------------------------------
def callback(self, topic, data):
"""回调函数实现"""
print(('client received topic:', topic, ', data:', data))
if __name__ == '__main__':
reqAddress = 'tcp://localhost:8899'
subAddress = 'tcp://localhost:9988'
tc = TestClient(reqAddress, subAddress)
tc.subscribeTopic('')
tc.start()
setting = {
'templateName': u'BestLimit 最优限价',
'vtSymbol': 'rb1810.SHFE',
'volume': 10,
'direction': DIRECTION_LONG,
'offset': OFFSET_OPEN
}
algoName = tc.addAlgo(setting)
print(u'启动算法,实例名', algoName)
sleep(5)
tc.stopAlgo(algoName)
print(u'停止算法')
input()
| 26.081633
| 75
| 0.525039
|
2176898c80a71ef6f0281730220c32c7980d9609
| 5,725
|
py
|
Python
|
sympy/physics/mechanics/tests/test_jointsmethod.py
|
nashalex/sympy
|
aec3e6512be46f0558f5dbcf2b4d723496c91649
|
[
"BSD-3-Clause"
] | 8,323
|
2015-01-02T15:51:43.000Z
|
2022-03-31T13:13:19.000Z
|
sympy/physics/mechanics/tests/test_jointsmethod.py
|
nashalex/sympy
|
aec3e6512be46f0558f5dbcf2b4d723496c91649
|
[
"BSD-3-Clause"
] | 15,102
|
2015-01-01T01:33:17.000Z
|
2022-03-31T22:53:13.000Z
|
sympy/physics/mechanics/tests/test_jointsmethod.py
|
nashalex/sympy
|
aec3e6512be46f0558f5dbcf2b4d723496c91649
|
[
"BSD-3-Clause"
] | 4,490
|
2015-01-01T17:48:07.000Z
|
2022-03-31T17:24:05.000Z
|
from sympy import symbols, Matrix, cos, sin, expand, trigsimp
from sympy.physics.mechanics import (PinJoint, JointsMethod, Body, KanesMethod,
PrismaticJoint, LagrangesMethod, inertia)
from sympy.physics.vector import dynamicsymbols, ReferenceFrame
from sympy.testing.pytest import raises
t = dynamicsymbols._t # type: ignore
def test_jointsmethod():
P = Body('P')
C = Body('C')
Pin = PinJoint('P1', P, C)
C_ixx, g = symbols('C_ixx g')
theta, omega = dynamicsymbols('theta_P1, omega_P1')
P.apply_force(g*P.y)
method = JointsMethod(P, Pin)
assert method.frame == P.frame
assert method.bodies == [C, P]
assert method.loads == [(P.masscenter, g*P.frame.y)]
assert method.q == [theta]
assert method.u == [omega]
assert method.kdes == [omega - theta.diff()]
soln = method.form_eoms()
assert soln == Matrix([[-C_ixx*omega.diff()]])
assert method.forcing_full == Matrix([[omega], [0]])
assert method.mass_matrix_full == Matrix([[1, 0], [0, C_ixx]])
assert isinstance(method.method, KanesMethod)
def test_jointmethod_duplicate_coordinates_speeds():
P = Body('P')
C = Body('C')
T = Body('T')
q, u = dynamicsymbols('q u')
P1 = PinJoint('P1', P, C, q)
P2 = PrismaticJoint('P2', C, T, q)
raises(ValueError, lambda: JointsMethod(P, P1, P2))
P1 = PinJoint('P1', P, C, speeds=u)
P2 = PrismaticJoint('P2', C, T, speeds=u)
raises(ValueError, lambda: JointsMethod(P, P1, P2))
P1 = PinJoint('P1', P, C, q, u)
P2 = PrismaticJoint('P2', C, T, q, u)
raises(ValueError, lambda: JointsMethod(P, P1, P2))
def test_complete_simple_double_pendulum():
q1, q2 = dynamicsymbols('q1 q2')
u1, u2 = dynamicsymbols('u1 u2')
m, l, g = symbols('m l g')
C = Body('C') # ceiling
PartP = Body('P', mass=m)
PartR = Body('R', mass=m)
J1 = PinJoint('J1', C, PartP, speeds=u1, coordinates=q1,
child_joint_pos=-l*PartP.x, parent_axis=C.z,
child_axis=PartP.z)
J2 = PinJoint('J2', PartP, PartR, speeds=u2, coordinates=q2,
child_joint_pos=-l*PartR.x, parent_axis=PartP.z,
child_axis=PartR.z)
PartP.apply_force(m*g*C.x)
PartR.apply_force(m*g*C.x)
method = JointsMethod(C, J1, J2)
method.form_eoms()
assert expand(method.mass_matrix_full) == Matrix([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 2*l**2*m*cos(q2) + 3*l**2*m, l**2*m*cos(q2) + l**2*m],
[0, 0, l**2*m*cos(q2) + l**2*m, l**2*m]])
assert trigsimp(method.forcing_full) == trigsimp(Matrix([[u1], [u2], [-g*l*m*(sin(q1 + q2) + sin(q1)) -
g*l*m*sin(q1) + l**2*m*(2*u1 + u2)*u2*sin(q2)],
[-g*l*m*sin(q1 + q2) - l**2*m*u1**2*sin(q2)]]))
def test_two_dof_joints():
q1, q2, u1, u2 = dynamicsymbols('q1 q2 u1 u2')
m, c1, c2, k1, k2 = symbols('m c1 c2 k1 k2')
W = Body('W')
B1 = Body('B1', mass=m)
B2 = Body('B2', mass=m)
J1 = PrismaticJoint('J1', W, B1, coordinates=q1, speeds=u1)
J2 = PrismaticJoint('J2', B1, B2, coordinates=q2, speeds=u2)
W.apply_force(k1*q1*W.x, reaction_body=B1)
W.apply_force(c1*u1*W.x, reaction_body=B1)
B1.apply_force(k2*q2*W.x, reaction_body=B2)
B1.apply_force(c2*u2*W.x, reaction_body=B2)
method = JointsMethod(W, J1, J2)
method.form_eoms()
MM = method.mass_matrix
forcing = method.forcing
rhs = MM.LUsolve(forcing)
assert expand(rhs[0]) == expand((-k1 * q1 - c1 * u1 + k2 * q2 + c2 * u2)/m)
assert expand(rhs[1]) == expand((k1 * q1 + c1 * u1 - 2 * k2 * q2 - 2 *
c2 * u2) / m)
def test_simple_pedulum():
l, m, g = symbols('l m g')
C = Body('C')
b = Body('b', mass=m)
q = dynamicsymbols('q')
P = PinJoint('P', C, b, speeds=q.diff(t), coordinates=q, child_joint_pos = -l*b.x,
parent_axis=C.z, child_axis=b.z)
b.potential_energy = - m * g * l * cos(q)
method = JointsMethod(C, P)
method.form_eoms(LagrangesMethod)
rhs = method.rhs()
assert rhs[1] == -g*sin(q)/l
def test_chaos_pendulum():
#https://www.pydy.org/examples/chaos_pendulum.html
mA, mB, lA, lB, IAxx, IBxx, IByy, IBzz, g = symbols('mA, mB, lA, lB, IAxx, IBxx, IByy, IBzz, g')
theta, phi, omega, alpha = dynamicsymbols('theta phi omega alpha')
A = ReferenceFrame('A')
B = ReferenceFrame('B')
rod = Body('rod', mass=mA, frame=A, central_inertia=inertia(A, IAxx, IAxx, 0))
plate = Body('plate', mass=mB, frame=B, central_inertia=inertia(B, IBxx, IByy, IBzz))
C = Body('C')
J1 = PinJoint('J1', C, rod, coordinates=theta, speeds=omega,
child_joint_pos=-lA*rod.z, parent_axis=C.y, child_axis=rod.y)
J2 = PinJoint('J2', rod, plate, coordinates=phi, speeds=alpha,
parent_joint_pos=(lB-lA)*rod.z, parent_axis=rod.z, child_axis=plate.z)
rod.apply_force(mA*g*C.z)
plate.apply_force(mB*g*C.z)
method = JointsMethod(C, J1, J2)
method.form_eoms()
MM = method.mass_matrix
forcing = method.forcing
rhs = MM.LUsolve(forcing)
xd = (-2 * IBxx * alpha * omega * sin(phi) * cos(phi) + 2 * IByy * alpha * omega * sin(phi) *
cos(phi) - g * lA * mA * sin(theta) - g * lB * mB * sin(theta)) / (IAxx + IBxx *
sin(phi)**2 + IByy * cos(phi)**2 + lA**2 * mA + lB**2 * mB)
assert (rhs[0] - xd).simplify() == 0
xd = (IBxx - IByy) * omega**2 * sin(phi) * cos(phi) / IBzz
assert (rhs[1] - xd).simplify() == 0
| 40.316901
| 115
| 0.566463
|
cbc969e92385375d3eda007a1721770fc41e30c5
| 92
|
py
|
Python
|
files/action_plugins/__init__.py
|
exastro-playbook-collection/setup_paragen
|
4c6778fe5ca649019f1758c02acff3bf920b4b17
|
[
"Apache-2.0"
] | null | null | null |
files/action_plugins/__init__.py
|
exastro-playbook-collection/setup_paragen
|
4c6778fe5ca649019f1758c02acff3bf920b4b17
|
[
"Apache-2.0"
] | null | null | null |
files/action_plugins/__init__.py
|
exastro-playbook-collection/setup_paragen
|
4c6778fe5ca649019f1758c02acff3bf920b4b17
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: UTF-8 -*-
# action plugins はansible上にパッケージではないですが
# テストのために、パッケージの形でテストができます
| 18.4
| 39
| 0.73913
|
7cbd361f5e6b82a6ce070264edbe9179ad08ff0b
| 6,254
|
py
|
Python
|
test/util/bitcoin-util-test.py
|
zahidaliayub/globaltoken
|
9fb33c559e995714d0a3f36b16e8e80ceafb5ae3
|
[
"MIT"
] | null | null | null |
test/util/bitcoin-util-test.py
|
zahidaliayub/globaltoken
|
9fb33c559e995714d0a3f36b16e8e80ceafb5ae3
|
[
"MIT"
] | null | null | null |
test/util/bitcoin-util-test.py
|
zahidaliayub/globaltoken
|
9fb33c559e995714d0a3f36b16e8e80ceafb5ae3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for bitcoin utils.
Runs automatically during `make check`.
Can also be run manually."""
import argparse
import binascii
import configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.read_file(open(os.path.dirname(__file__) + "/../config.ini"))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(config["environment"]["SRCDIR"] + "/test/util/data", "bitcoin-util-test.json", config["environment"])
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = testDir + "/" + input_basename
raw_data = open(input_filename).read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = buildenv["BUILDDIR"] + "/src/" + testObj['exec'] + buildenv["EXEEXT"]
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = testDir + "/" + testObj['input']
inputData = open(filename).read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(testDir + "/" + outputFn).read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, globaltoken-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| 36.788235
| 125
| 0.629677
|
e477859829d1b5d0a076e425df7cc6a7898fc127
| 2,143
|
py
|
Python
|
demo_pipline.py
|
sokunmin/pytorch-video-pipeline
|
6252c396eb7f704d3c9b07ff0021b079340bc995
|
[
"MIT"
] | null | null | null |
demo_pipline.py
|
sokunmin/pytorch-video-pipeline
|
6252c396eb7f704d3c9b07ff0021b079340bc995
|
[
"MIT"
] | null | null | null |
demo_pipline.py
|
sokunmin/pytorch-video-pipeline
|
6252c396eb7f704d3c9b07ff0021b079340bc995
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from pathlib import Path
import argparse
import logging
import json
import cv2
import time
from watchdog.streams.streams_plugin import StreamPipeline
from watchdog.utils.decoder import ConfigDecoder, Profiler
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', '--config', metavar="FILE",
default=Path(__file__).parent / 'cfg_bak.json',
help='path to configuration JSON file')
parser.add_argument('-l', '--log', metavar="FILE",
help='output a MOT Challenge format log (e.g. eval/results/mot17-04.txt)')
args = parser.parse_args()
# set up logging
logging.basicConfig(format='%(asctime)s [%(levelname)8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(StreamPipeline.__name__)
logger.setLevel(logging.DEBUG)
# load config file
with open(args.config) as cfg_file:
config = json.load(cfg_file, cls=ConfigDecoder)
log = None
pipeline = StreamPipeline()
pipeline.init(config)
cv2.namedWindow("Video", cv2.WINDOW_AUTOSIZE)
logger.info('Starting video capture ...')
pipeline.exec()
try:
with Profiler('app') as prof:
start_time = time.time()
dt_avg = 0
while cv2.getWindowProperty("Video", 0) >= 0:
if not pipeline.empty():
tiled_image = pipeline.read()
dt = time.time() - start_time
start_time = time.time()
dt_avg = .9 * dt_avg + .1 * dt
fps = 1 / dt_avg
cv2.putText(tiled_image, str(round(fps, 1)) + ' fps', (0, 100),
cv2.FONT_HERSHEY_SIMPLEX, .75, (255, 255, 255), 2)
cv2.imshow('Video', tiled_image)
if cv2.waitKey(1) & 0xFF == 27:
break
finally:
if log is not None:
log.close()
pipeline.quit()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 32.469697
| 103
| 0.574428
|
e473d814c59a16314587fb6aefd52a4dee21d028
| 40,336
|
py
|
Python
|
tests/test_metadata_wrangler.py
|
NYPL-Simplified/circulation
|
1ea3eb81d9c8e3121b0fc0e1a40ac42956c29084
|
[
"Apache-2.0"
] | 16
|
2016-06-27T18:09:37.000Z
|
2021-12-07T15:20:52.000Z
|
tests/test_metadata_wrangler.py
|
NYPL-Simplified/circulation
|
1ea3eb81d9c8e3121b0fc0e1a40ac42956c29084
|
[
"Apache-2.0"
] | 809
|
2016-01-25T15:41:05.000Z
|
2022-03-29T16:12:38.000Z
|
tests/test_metadata_wrangler.py
|
NYPL-Simplified/circulation
|
1ea3eb81d9c8e3121b0fc0e1a40ac42956c29084
|
[
"Apache-2.0"
] | 19
|
2016-05-06T16:18:50.000Z
|
2021-05-01T06:33:18.000Z
|
"""Tests of the Monitors and CoverageProviders associated with the metadata
wrangler.
"""
import datetime
import feedparser
import pytest
from core.config import (
CannotLoadConfiguration,
Configuration,
temp_config,
)
from core.coverage import (
CoverageFailure,
)
from core.model import (
CoverageRecord,
DataSource,
ExternalIntegration,
Hyperlink,
Identifier,
LicensePool,
Timestamp,
)
from core.opds_import import MockMetadataWranglerOPDSLookup
from core.testing import (
MockRequestsResponse,
AlwaysSuccessfulCoverageProvider,
)
from core.util.datetime_helpers import (
datetime_utc,
utc_now,
)
from core.util.http import BadResponseException
from core.util.opds_writer import OPDSFeed
from api.metadata_wrangler import (
BaseMetadataWranglerCoverageProvider,
MetadataUploadCoverageProvider,
MetadataWranglerCollectionReaper,
MetadataWranglerCollectionRegistrar,
MWAuxiliaryMetadataMonitor,
MWCollectionUpdateMonitor,
)
from api.testing import MonitorTest
from core.testing import DatabaseTest
from . import sample_data
class InstrumentedMWCollectionUpdateMonitor(MWCollectionUpdateMonitor):
def __init__(self, *args, **kwargs):
super(InstrumentedMWCollectionUpdateMonitor, self).__init__(*args, **kwargs)
self.imports = []
def import_one_feed(self, timestamp, url):
self.imports.append((timestamp, url))
return super(InstrumentedMWCollectionUpdateMonitor,
self).import_one_feed(timestamp, url)
class TestMWCollectionUpdateMonitor(MonitorTest):
def setup_method(self):
super(TestMWCollectionUpdateMonitor, self).setup_method()
self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
ExternalIntegration.METADATA_GOAL,
username='abc', password='def', url=self._url
)
self.collection = self._collection(
protocol=ExternalIntegration.BIBLIOTHECA, external_account_id='lib'
)
self.lookup = MockMetadataWranglerOPDSLookup.from_config(
self._db, self.collection
)
self.monitor = InstrumentedMWCollectionUpdateMonitor(
self._db, self.collection, self.lookup
)
def test_monitor_requires_authentication(self):
class Mock(object):
authenticated = False
self.monitor.lookup = Mock()
with pytest.raises(Exception) as excinfo:
self.monitor.run_once(self.ts)
assert "no authentication credentials" in str(excinfo.value)
def test_import_one_feed(self):
data = sample_data('metadata_updates_response.opds', 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
next_links, editions, timestamp = self.monitor.import_one_feed(
None, None
)
# The 'next' links found in the OPDS feed are returned.
assert ['http://next-link/'] == next_links
# Insofar as is possible, all <entry> tags are converted into
# Editions.
assert ['9781594632556'] == [x.primary_identifier.identifier
for x in editions]
# The earliest time found in the OPDS feed is returned as a
# candidate for the Monitor's timestamp.
assert datetime_utc(2016, 9, 20, 19, 37, 2) == timestamp
def test_empty_feed_stops_import(self):
# We don't follow the 'next' link of an empty feed.
data = sample_data('metadata_updates_empty_response.opds', 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
new_timestamp = self.monitor.run()
# We could have followed the 'next' link, but we chose not to.
assert [(None, None)] == self.monitor.imports
assert 1 == len(self.lookup.requests)
# Since there were no <entry> tags, the timestamp's finish
# date was set to the <updated> date of the feed itself, minus
# one day (to avoid race conditions).
assert (datetime_utc(2016, 9, 19, 19, 37, 10) ==
self.monitor.timestamp().finish)
def test_run_once(self):
# Setup authentication and Metadata Wrangler details.
lp = self._licensepool(
None, data_source_name=DataSource.BIBLIOTHECA,
collection=self.collection
)
lp.identifier.type = Identifier.BIBLIOTHECA_ID
isbn = Identifier.parse_urn(self._db, 'urn:isbn:9781594632556')[0]
lp.identifier.equivalent_to(
DataSource.lookup(self._db, DataSource.BIBLIOTHECA), isbn, 1
)
assert [] == lp.identifier.links
assert [] == lp.identifier.measurements
# Queue some data to be found.
responses = (
'metadata_updates_response.opds',
'metadata_updates_empty_response.opds',
)
for filename in responses:
data = sample_data(filename, 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
timestamp = self.ts
new_timestamp = self.monitor.run_once(timestamp)
# We have a new value to use for the Monitor's timestamp -- the
# earliest date seen in the last OPDS feed that contained
# any entries.
assert datetime_utc(2016, 9, 20, 19, 37, 2) == new_timestamp.finish
assert "Editions processed: 1" == new_timestamp.achievements
# Normally run_once() doesn't update the monitor's timestamp,
# but this implementation does, so that work isn't redone if
# run_once() crashes or the monitor is killed.
assert new_timestamp.finish == self.monitor.timestamp().finish
# The original Identifier has information from the
# mock Metadata Wrangler.
mw_source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
assert 3 == len(lp.identifier.links)
[quality] = lp.identifier.measurements
assert mw_source == quality.data_source
# Check the URLs we processed.
url1, url2 = [x[0] for x in self.lookup.requests]
# The first URL processed was the default one for the
# MetadataWranglerOPDSLookup.
assert self.lookup.get_collection_url(self.lookup.UPDATES_ENDPOINT) == url1
# The second URL processed was whatever we saw in the 'next' link.
assert "http://next-link/" == url2
# Since that URL didn't contain any new imports, we didn't process
# its 'next' link, http://another-next-link/.
def test_no_changes_means_no_timestamp_update(self):
before = utc_now()
self.monitor.timestamp().finish = before
# We're going to ask the metadata wrangler for updates, but
# there will be none -- not even a feed-level update
data = sample_data('metadata_updates_empty_response_no_feed_timestamp.opds', 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
new_timestamp = self.monitor.run_once(self.ts)
# run_once() returned a TimestampData referencing the original
# timestamp, and the Timestamp object was not updated.
assert before == new_timestamp.finish
assert before == self.monitor.timestamp().finish
# If timestamp.finish is None before the update is run, and
# there are no updates, the timestamp will be set
# to None.
self.monitor.timestamp().finish = None
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
new_timestamp = self.monitor.run_once(self.ts)
assert Timestamp.CLEAR_VALUE == new_timestamp.finish
def test_no_import_loop(self):
# We stop processing a feed's 'next' link if it links to a URL we've
# already seen.
data = sample_data('metadata_updates_response.opds', 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
data = data.replace(b"http://next-link/", b"http://different-link/")
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
# This introduces a loop.
data = data.replace(b"http://next-link/", b"http://next-link/")
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
new_timestamp = self.monitor.run_once(self.ts)
# Even though all these pages had the same content, we kept
# processing them until we encountered a 'next' link we had
# seen before; then we stopped.
first, second, third = self.monitor.imports
assert (None, None) == first
assert (None, 'http://next-link/') == second
assert (None, 'http://different-link/') == third
assert datetime_utc(2016, 9, 20, 19, 37, 2) == new_timestamp.finish
def test_get_response(self):
class Mock(MockMetadataWranglerOPDSLookup):
def __init__(self):
self.last_timestamp = None
self.urls = []
def updates(self, timestamp):
self.last_timestamp = timestamp
return MockRequestsResponse(
200, {"content-type": OPDSFeed.ACQUISITION_FEED_TYPE}
)
def _get(self, _url):
self.urls.append(_url)
return MockRequestsResponse(
200, {"content-type": OPDSFeed.ACQUISITION_FEED_TYPE}
)
# If you pass in None for the URL, it passes the timestamp into
# updates()
lookup = Mock()
monitor = MWCollectionUpdateMonitor(
self._db, self.collection, lookup
)
timestamp = object()
response = monitor.get_response(timestamp=timestamp, url=None)
assert 200 == response.status_code
assert timestamp == lookup.last_timestamp
assert [] == lookup.urls
# If you pass in a URL, the timestamp is ignored and
# the URL is passed into _get().
lookup = Mock()
monitor = MWCollectionUpdateMonitor(
self._db, self.collection, lookup
)
response = monitor.get_response(timestamp=None, url='http://now used/')
assert 200 == response.status_code
assert None == lookup.last_timestamp
assert ['http://now used/'] == lookup.urls
class TestMWAuxiliaryMetadataMonitor(MonitorTest):
def setup_method(self):
super(TestMWAuxiliaryMetadataMonitor, self).setup_method()
self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
ExternalIntegration.METADATA_GOAL,
username='abc', password='def', url=self._url
)
self.collection = self._collection(
protocol=ExternalIntegration.OVERDRIVE, external_account_id='lib'
)
self.lookup = MockMetadataWranglerOPDSLookup.from_config(
self._db, self.collection
)
provider = AlwaysSuccessfulCoverageProvider(self._db)
self.monitor = MWAuxiliaryMetadataMonitor(
self._db, self.collection, lookup=self.lookup, provider=provider
)
def test_monitor_requires_authentication(self):
class Mock(object):
authenticated = False
self.monitor.lookup = Mock()
with pytest.raises(Exception) as excinfo:
self.monitor.run_once(self.ts)
assert "no authentication credentials" in str(excinfo.value)
def prep_feed_identifiers(self):
ignored = self._identifier()
# Create an Overdrive ID to match the one in the feed.
overdrive = self._identifier(
identifier_type=Identifier.OVERDRIVE_ID,
foreign_id='4981c34f-d518-48ff-9659-2601b2b9bdc1'
)
# Create an ISBN to match the one in the feed.
isbn = self._identifier(
identifier_type=Identifier.ISBN, foreign_id='9781602835740'
)
# Create a Axis 360 ID equivalent to the other ISBN in the feed.
axis_360 = self._identifier(
identifier_type=Identifier.AXIS_360_ID, foreign_id='fake'
)
axis_360_isbn = self._identifier(
identifier_type=Identifier.ISBN, foreign_id='9781569478295'
)
axis_source = DataSource.lookup(self._db, DataSource.AXIS_360)
axis_360.equivalent_to(axis_source, axis_360_isbn, 1)
self._db.commit()
# Put all of the identifiers in the collection.
for identifier in [overdrive, isbn, axis_360]:
self._edition(
data_source_name=axis_source.name,
with_license_pool=True,
identifier_type=identifier.type,
identifier_id=identifier.identifier,
collection=self.collection,
)
return overdrive, isbn, axis_360
def test_get_identifiers(self):
overdrive, isbn, axis_360 = self.prep_feed_identifiers()
data = sample_data('metadata_data_needed_response.opds', 'opds')
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
identifiers, next_links = self.monitor.get_identifiers()
# The expected identifiers are returned, including the mapped axis_360
# identifier.
assert sorted([overdrive, axis_360, isbn]) == sorted(identifiers)
assert ['http://next-link'] == next_links
def test_run_once(self):
overdrive, isbn, axis_360 = self.prep_feed_identifiers()
# Give one of the identifiers a full work.
self._work(presentation_edition=overdrive.primarily_identifies[0])
# And another identifier a work without entries.
w = self._work(presentation_edition=isbn.primarily_identifies[0])
w.simple_opds_entry = w.verbose_opds_entry = None
# Queue some response feeds.
feed1 = sample_data('metadata_data_needed_response.opds', 'opds')
feed2 = sample_data('metadata_data_needed_empty_response.opds', 'opds')
for feed in [feed1, feed2]:
self.lookup.queue_response(
200, {'content-type' : OPDSFeed.ACQUISITION_FEED_TYPE}, feed
)
progress = self.monitor.run_once(self.ts)
# Only the identifier with a work has been given coverage.
assert "Identifiers processed: 1" == progress.achievements
# The TimestampData returned by run_once() does not include
# any timing information -- that will be applied by run().
assert None == progress.start
assert None == progress.finish
record = CoverageRecord.lookup(
overdrive, self.monitor.provider.data_source,
operation=self.monitor.provider.operation
)
assert record
for identifier in [axis_360, isbn]:
record = CoverageRecord.lookup(
identifier, self.monitor.provider.data_source,
operation=self.monitor.provider.operation
)
assert None == record
class MetadataWranglerCoverageProviderTest(DatabaseTest):
def create_provider(self, **kwargs):
lookup = MockMetadataWranglerOPDSLookup.from_config(self._db, self.collection)
return self.TEST_CLASS(self.collection, lookup, **kwargs)
def setup_method(self):
super(MetadataWranglerCoverageProviderTest, self).setup_method()
self.integration = self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
goal=ExternalIntegration.METADATA_GOAL, url=self._url,
username='abc', password='def'
)
self.source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
self.collection = self._collection(
protocol=ExternalIntegration.BIBLIOTHECA, external_account_id='lib'
)
self.provider = self.create_provider()
self.lookup_client = self.provider.lookup_client
def opds_feed_identifiers(self):
"""Creates three Identifiers to use for testing with sample OPDS files."""
# An identifier directly represented in the OPDS response.
valid_id = self._identifier(foreign_id='2020110')
# An identifier mapped to an identifier represented in the OPDS
# response.
source = DataSource.lookup(self._db, DataSource.AXIS_360)
mapped_id = self._identifier(
identifier_type=Identifier.AXIS_360_ID, foreign_id='0015187876'
)
equivalent_id = self._identifier(
identifier_type=Identifier.ISBN, foreign_id='9781936460236'
)
mapped_id.equivalent_to(source, equivalent_id, 1)
# An identifier that's not represented in the OPDS response.
lost_id = self._identifier()
return valid_id, mapped_id, lost_id
class TestBaseMetadataWranglerCoverageProvider(MetadataWranglerCoverageProviderTest):
class Mock(BaseMetadataWranglerCoverageProvider):
SERVICE_NAME = "Mock"
DATA_SOURCE_NAME = DataSource.OVERDRIVE
TEST_CLASS = Mock
def test_must_be_authenticated(self):
"""CannotLoadConfiguration is raised if you try to create a
metadata wrangler coverage provider that can't authenticate
with the metadata wrangler.
"""
class UnauthenticatedLookupClient(object):
authenticated = False
with pytest.raises(CannotLoadConfiguration) as excinfo:
self.Mock(self.collection, UnauthenticatedLookupClient())
assert "Authentication for the Library Simplified Metadata Wrangler " in str(excinfo.value)
def test_input_identifier_types(self):
"""Verify all the different types of identifiers we send
to the metadata wrangler.
"""
assert (
set([
Identifier.OVERDRIVE_ID,
Identifier.BIBLIOTHECA_ID,
Identifier.AXIS_360_ID,
Identifier.ONECLICK_ID,
Identifier.URI,
]) ==
set(BaseMetadataWranglerCoverageProvider.INPUT_IDENTIFIER_TYPES))
def test_create_identifier_mapping(self):
# Most identifiers map to themselves.
overdrive = self._identifier(Identifier.OVERDRIVE_ID)
# But Axis 360 and 3M identifiers map to equivalent ISBNs.
axis = self._identifier(Identifier.AXIS_360_ID)
threem = self._identifier(Identifier.THREEM_ID)
isbn_axis = self._identifier(Identifier.ISBN)
isbn_threem = self._identifier(Identifier.ISBN)
who_says = DataSource.lookup(self._db, DataSource.AXIS_360)
axis.equivalent_to(who_says, isbn_axis, 1)
threem.equivalent_to(who_says, isbn_threem, 1)
mapping = self.provider.create_identifier_mapping([overdrive, axis, threem])
assert overdrive == mapping[overdrive]
assert axis == mapping[isbn_axis]
assert threem == mapping[isbn_threem]
def test_coverage_records_for_unhandled_items_include_collection(self):
# NOTE: This could be made redundant by adding test coverage to
# CoverageProvider.process_batch_and_handle_results in core.
data = sample_data('metadata_sync_response.opds', 'opds')
self.lookup_client.queue_response(
200, {'content-type': OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
identifier = self._identifier()
self.provider.process_batch_and_handle_results([identifier])
[record] = identifier.coverage_records
assert CoverageRecord.TRANSIENT_FAILURE == record.status
assert self.provider.data_source == record.data_source
assert self.provider.operation == record.operation
assert self.provider.collection == record.collection
class TestMetadataWranglerCollectionRegistrar(MetadataWranglerCoverageProviderTest):
TEST_CLASS = MetadataWranglerCollectionRegistrar
def test_constants(self):
# This CoverageProvider runs Identifiers through the 'lookup'
# endpoint and marks success with CoverageRecords that have
# the IMPORT_OPERATION operation.
assert self.provider.lookup_client.lookup == self.provider.api_method
assert CoverageRecord.IMPORT_OPERATION == self.TEST_CLASS.OPERATION
def test_process_batch(self):
"""End-to-end test of the registrar's process_batch() implementation.
"""
data = sample_data('metadata_sync_response.opds', 'opds')
self.lookup_client.queue_response(
200, {'content-type': OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
valid_id, mapped_id, lost_id = self.opds_feed_identifiers()
results = self.provider.process_batch(
[valid_id, mapped_id, lost_id]
)
# The Identifier that resulted in a 200 message was returned.
#
# The Identifier that resulted in a 201 message was returned.
#
# The Identifier that was ignored by the server was not
# returned.
#
# The Identifier that was not requested but was sent back by
# the server anyway was ignored.
assert sorted([valid_id, mapped_id]) == sorted(results)
def test_process_batch_errors(self):
"""When errors are raised during batch processing, an exception is
raised and no CoverageRecords are created.
"""
# This happens if the 'server' sends data with the wrong media
# type.
self.lookup_client.queue_response(
200, {'content-type': 'json/application'}, '{ "title": "It broke." }'
)
id1 = self._identifier()
id2 = self._identifier()
with pytest.raises(BadResponseException) as excinfo:
self.provider.process_batch([id1, id2])
assert 'Wrong media type' in str(excinfo.value)
assert [] == id1.coverage_records
assert [] == id2.coverage_records
# Of if the 'server' sends an error response code.
self.lookup_client.queue_response(
500, {'content-type': OPDSFeed.ACQUISITION_FEED_TYPE},
'Internal Server Error'
)
with pytest.raises(BadResponseException) as excinfo:
self.provider.process_batch([id1, id2])
assert "Got status code 500" in str(excinfo.value)
assert [] == id1.coverage_records
assert [] == id2.coverage_records
# If a message comes back with an unexpected status, a
# CoverageFailure is created.
data = sample_data('unknown_message_status_code.opds', 'opds')
valid_id = self.opds_feed_identifiers()[0]
self.lookup_client.queue_response(
200, {'content-type': OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
[result] = self.provider.process_batch([valid_id])
assert True == isinstance(result, CoverageFailure)
assert valid_id == result.obj
assert '418: Mad Hatter' == result.exception
# The OPDS importer didn't know which Collection to associate
# with this CoverageFailure, but the CoverageProvider does,
# and it set .collection appropriately.
assert self.provider.collection == result.collection
def test_items_that_need_coverage_excludes_unavailable_items(self):
"""A LicensePool that's not actually available doesn't need coverage.
"""
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
pool.licenses_owned = 0
assert 0 == self.provider.items_that_need_coverage().count()
# Open-access titles _do_ need coverage.
pool.open_access = True
assert [pool.identifier] == self.provider.items_that_need_coverage().all()
def test_items_that_need_coverage_removes_reap_records_for_relicensed_items(self):
"""A LicensePool that's not actually available doesn't need coverage.
"""
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
identifier = pool.identifier
original_coverage_records = list(identifier.coverage_records)
# This identifier was reaped...
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=CoverageRecord.REAP_OPERATION,
collection=self.collection
)
assert (
set(original_coverage_records + [cr]) ==
set(identifier.coverage_records))
# ... but then it was relicensed.
pool.licenses_owned = 10
assert [identifier] == self.provider.items_that_need_coverage().all()
# The now-inaccurate REAP record has been removed.
assert original_coverage_records == identifier.coverage_records
def test_identifier_covered_in_one_collection_not_covered_in_another(self):
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
identifier = pool.identifier
other_collection = self._collection()
# This Identifier needs coverage.
qu = self.provider.items_that_need_coverage()
assert [identifier] == qu.all()
# Adding coverage for an irrelevant collection won't fix that.
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=self.provider.OPERATION,
collection=other_collection
)
assert [identifier] == qu.all()
# Adding coverage for the relevant collection will.
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=self.provider.OPERATION,
collection=self.provider.collection
)
assert [] == qu.all()
def test_identifier_reaped_from_one_collection_covered_in_another(self):
"""An Identifier can be reaped from one collection but still
need coverage in another.
"""
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
identifier = pool.identifier
other_collection = self._collection()
# This identifier was reaped from other_collection, but not
# from self.provider.collection.
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=CoverageRecord.REAP_OPERATION,
collection=other_collection
)
# It still needs to be covered in self.provider.collection.
assert [identifier] == self.provider.items_that_need_coverage().all()
def test_items_that_need_coverage_respects_cutoff(self):
"""Verify that this coverage provider respects the cutoff_time
argument.
"""
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=self.provider.OPERATION, collection=self.collection
)
# We have a coverage record already, so this book doesn't show
# up in items_that_need_coverage
items = self.provider.items_that_need_coverage().all()
assert [] == items
# But if we send a cutoff_time that's later than the time
# associated with the coverage record...
one_hour_from_now = (
utc_now() + datetime.timedelta(seconds=3600)
)
provider_with_cutoff = self.create_provider(
cutoff_time=one_hour_from_now
)
# The book starts showing up in items_that_need_coverage.
assert ([pool.identifier] ==
provider_with_cutoff.items_that_need_coverage().all())
def test_items_that_need_coverage_respects_count_as_covered(self):
# Here's a coverage record with a transient failure.
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.OVERDRIVE_ID,
)
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=self.provider.operation,
status=CoverageRecord.TRANSIENT_FAILURE,
collection=self.collection
)
# Ordinarily, a transient failure does not count as coverage.
[needs_coverage] = self.provider.items_that_need_coverage().all()
assert needs_coverage == pool.identifier
# But if we say that transient failure counts as coverage, it
# does count.
assert ([] ==
self.provider.items_that_need_coverage(
count_as_covered=CoverageRecord.TRANSIENT_FAILURE
).all())
def test_isbn_covers_are_imported_from_mapped_identifiers(self):
# Now that we pass ISBN equivalents instead of Bibliotheca identifiers
# to the Metadata Wrangler, they're not getting covers. Let's confirm
# that the problem isn't on the Circulation Manager import side of things.
# Create a Bibliotheca identifier with a license pool.
source = DataSource.lookup(self._db, DataSource.BIBLIOTHECA)
identifier = self._identifier(identifier_type=Identifier.BIBLIOTHECA_ID)
LicensePool.for_foreign_id(
self._db, source, identifier.type, identifier.identifier,
collection=self.provider.collection
)
# Create an ISBN and set it equivalent.
isbn = self._identifier(identifier_type=Identifier.ISBN)
isbn.identifier = '9781594632556'
identifier.equivalent_to(source, isbn, 1)
opds = sample_data('metadata_isbn_response.opds', 'opds')
self.provider.lookup_client.queue_response(
200, {'content-type': 'application/atom+xml;profile=opds-catalog;kind=acquisition'}, opds
)
result = self.provider.process_item(identifier)
# The lookup is successful
assert result == identifier
# The appropriate cover links are transferred.
identifier_uris = [l.resource.url for l in identifier.links
if l.rel in [Hyperlink.IMAGE, Hyperlink.THUMBNAIL_IMAGE]]
expected = [
'http://book-covers.nypl.org/Content%20Cafe/ISBN/9781594632556/cover.jpg',
'http://book-covers.nypl.org/scaled/300/Content%20Cafe/ISBN/9781594632556/cover.jpg'
]
assert sorted(identifier_uris) == sorted(expected)
# The ISBN doesn't get any information.
assert isbn.links == []
class MetadataWranglerCollectionManagerTest(DatabaseTest):
def setup_method(self):
super(MetadataWranglerCollectionManagerTest, self).setup_method()
self.integration = self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
goal=ExternalIntegration.METADATA_GOAL, url=self._url,
username='abc', password='def'
)
self.source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
self.collection = self._collection(
protocol=ExternalIntegration.BIBLIOTHECA, external_account_id='lib'
)
self.lookup = MockMetadataWranglerOPDSLookup.from_config(
self._db, collection=self.collection
)
class TestMetadataWranglerCollectionReaper(MetadataWranglerCoverageProviderTest):
TEST_CLASS = MetadataWranglerCollectionReaper
def test_constants(self):
# This CoverageProvider runs Identifiers through the 'remove'
# endpoint and marks success with CoverageRecords that have
# the REAP_OPERATION operation.
assert CoverageRecord.REAP_OPERATION == self.TEST_CLASS.OPERATION
assert self.provider.lookup_client.remove == self.provider.api_method
def test_items_that_need_coverage(self):
"""The reaper only returns identifiers with no-longer-licensed
license_pools that have been synced with the Metadata
Wrangler.
"""
# Create an item that was imported into the Wrangler-side
# collection but no longer has any owned licenses
covered_unlicensed_lp = self._licensepool(
None, open_access=False, set_edition_as_presentation=True,
collection=self.collection
)
covered_unlicensed_lp.update_availability(0, 0, 0, 0)
cr = self._coverage_record(
covered_unlicensed_lp.presentation_edition, self.source,
operation=CoverageRecord.IMPORT_OPERATION,
collection=self.provider.collection,
)
# Create an unsynced item that doesn't have any licenses
uncovered_unlicensed_lp = self._licensepool(None, open_access=False)
uncovered_unlicensed_lp.update_availability(0, 0, 0, 0)
# And an unsynced item that has licenses.
licensed_lp = self._licensepool(None, open_access=False)
# Create an open access license pool
open_access_lp = self._licensepool(None)
items = self.provider.items_that_need_coverage().all()
assert 1 == len(items)
# Items that are licensed are ignored.
assert licensed_lp.identifier not in items
# Items with open access license pools are ignored.
assert open_access_lp.identifier not in items
# Items that haven't been synced with the Metadata Wrangler are
# ignored, even if they don't have licenses.
assert uncovered_unlicensed_lp.identifier not in items
# Only synced items without owned licenses are returned.
assert [covered_unlicensed_lp.identifier] == items
# Items that had unsuccessful syncs are not returned.
cr.status = CoverageRecord.TRANSIENT_FAILURE
assert [] == self.provider.items_that_need_coverage().all()
def test_process_batch(self):
data = sample_data('metadata_reaper_response.opds', 'opds')
self.lookup_client.queue_response(
200, {'content-type': OPDSFeed.ACQUISITION_FEED_TYPE}, data
)
valid_id, mapped_id, lost_id = self.opds_feed_identifiers()
results = self.provider.process_batch([valid_id, mapped_id, lost_id])
# The valid_id and mapped_id were handled successfully.
# The server ignored lost_id, so nothing happened to it,
# and the server sent a fourth ID we didn't ask for,
# which we ignored.
assert sorted(results) == sorted([valid_id, mapped_id])
def test_finalize_batch(self):
# Metadata Wrangler sync coverage records are deleted from the db
# when the the batch is finalized if the item has been reaped.
# Create an identifier that has been imported and one that's
# been reaped.
sync_cr = self._coverage_record(
self._edition(), self.source,
operation=CoverageRecord.IMPORT_OPERATION,
collection=self.provider.collection
)
reaped_cr = self._coverage_record(
self._edition(), self.source,
operation=CoverageRecord.REAP_OPERATION,
collection=self.provider.collection
)
# Create coverage records for an Identifier that has been both synced
# and reaped.
doubly_covered = self._edition()
doubly_sync_record = self._coverage_record(
doubly_covered, self.source,
operation=CoverageRecord.IMPORT_OPERATION,
collection=self.provider.collection
)
doubly_reap_record = self._coverage_record(
doubly_covered, self.source,
operation=CoverageRecord.REAP_OPERATION,
collection=self.provider.collection,
)
self.provider.finalize_batch()
remaining_records = self._db.query(CoverageRecord).all()
# The syncing record has been deleted from the database
assert doubly_sync_record not in remaining_records
assert (sorted([sync_cr, reaped_cr, doubly_reap_record], key=lambda x: x.id) ==
sorted(remaining_records, key=lambda x: x.id))
class TestMetadataUploadCoverageProvider(DatabaseTest):
def create_provider(self, **kwargs):
upload_client = MockMetadataWranglerOPDSLookup.from_config(self._db, self.collection)
return MetadataUploadCoverageProvider(
self.collection, upload_client, **kwargs
)
def setup_method(self):
super(TestMetadataUploadCoverageProvider, self).setup_method()
self.integration = self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
goal=ExternalIntegration.METADATA_GOAL, url=self._url,
username='abc', password='def'
)
self.source = DataSource.lookup(self._db, DataSource.METADATA_WRANGLER)
self.collection = self._collection(
protocol=ExternalIntegration.BIBLIOTHECA, external_account_id='lib'
)
self.provider = self.create_provider()
def test_items_that_need_coverage_only_finds_transient_failures(self):
"""Verify that this coverage provider only covers items that have
transient failure CoverageRecords.
"""
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
# We don't have a CoverageRecord yet, so the book doesn't show up.
items = self.provider.items_that_need_coverage().all()
assert [] == items
cr = self._coverage_record(
pool.identifier, self.provider.data_source,
operation=self.provider.OPERATION, collection=self.collection
)
# With a successful or persistent failure CoverageRecord, it still doesn't show up.
cr.status = CoverageRecord.SUCCESS
items = self.provider.items_that_need_coverage().all()
assert [] == items
cr.status = CoverageRecord.PERSISTENT_FAILURE
items = self.provider.items_that_need_coverage().all()
assert [] == items
# But with a transient failure record it does.
cr.status = CoverageRecord.TRANSIENT_FAILURE
items = self.provider.items_that_need_coverage().all()
assert [edition.primary_identifier] == items
def test_process_batch_uploads_metadata(self):
class MockMetadataClient(object):
metadata_feed = None
authenticated = True
def canonicalize_author_name(self, identifier, working_display_name):
return working_display_name
def add_with_metadata(self, feed):
self.metadata_feed = feed
metadata_client = MockMetadataClient()
provider = MetadataUploadCoverageProvider(
self.collection, metadata_client
)
edition, pool = self._edition(
with_license_pool=True, collection=self.collection,
identifier_type=Identifier.BIBLIOTHECA_ID
)
work = pool.calculate_work()
# This identifier has no Work.
no_work = self._identifier()
results = provider.process_batch([pool.identifier, no_work])
# An OPDS feed of metadata was sent to the metadata wrangler.
assert metadata_client.metadata_feed != None
feed = feedparser.parse(str(metadata_client.metadata_feed))
urns = [entry.get("id") for entry in feed.get("entries", [])]
# Only the identifier work a work ends up in the feed.
assert [pool.identifier.urn] == urns
# There are two results: the identifier with a work and a CoverageFailure.
assert 2 == len(results)
assert pool.identifier in results
[failure] = [r for r in results if isinstance(r, CoverageFailure)]
assert no_work == failure.obj
| 39.352195
| 101
| 0.661146
|
eb5d6df3e4dd75914649c2b248a64a65b2bc2d43
| 84
|
py
|
Python
|
codebase/evaluate_MAE_td.py
|
Mikeriess/ML4PM_Temp_losses
|
6b31c33c942c2473f237d5e29153aebc2921be47
|
[
"MIT"
] | null | null | null |
codebase/evaluate_MAE_td.py
|
Mikeriess/ML4PM_Temp_losses
|
6b31c33c942c2473f237d5e29153aebc2921be47
|
[
"MIT"
] | null | null | null |
codebase/evaluate_MAE_td.py
|
Mikeriess/ML4PM_Temp_losses
|
6b31c33c942c2473f237d5e29153aebc2921be47
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 24 13:41:39 2021
@author: Mike
"""
| 10.5
| 35
| 0.559524
|
1a2cfdfaa89a3aa207f3f0ecd3181b0266f86bb9
| 7,677
|
py
|
Python
|
raster_processing.py
|
mweissbacher/xView2_FDNY
|
50116734606a743946baf4f63a70b49473634ecc
|
[
"MIT"
] | 6
|
2020-07-21T01:00:43.000Z
|
2021-12-22T07:30:56.000Z
|
raster_processing.py
|
mweissbacher/xView2_FDNY
|
50116734606a743946baf4f63a70b49473634ecc
|
[
"MIT"
] | null | null | null |
raster_processing.py
|
mweissbacher/xView2_FDNY
|
50116734606a743946baf4f63a70b49473634ecc
|
[
"MIT"
] | 1
|
2020-07-20T21:35:26.000Z
|
2020-07-20T21:35:26.000Z
|
from itertools import product
import random
import string
import subprocess
import fiona
import numpy as np
import rasterio
import rasterio.merge
import rasterio.warp
import rasterio.plot
from rasterio import windows
from rasterio.features import shapes
from shapely.geometry import shape
from shapely.geometry.polygon import Polygon
from shapely.ops import cascaded_union
from osgeo import gdal
from tqdm import tqdm
from handler import *
from pathlib import Path
def reproject(in_file, dest_file, in_crs, dest_crs='EPSG:4326'):
"""
Re-project images
:param in_file: path to file to be reprojected
:param dest_file: path to write re-projected image
:param in_crs: crs of input file -- only valid if image does not contain crs in metadata
:param dest_crs: destination crs
:return: path to re-projected image
"""
input_raster = gdal.Open(str(in_file))
if input_raster.GetSpatialRef() is not None:
in_crs = input_raster.GetSpatialRef()
if in_crs is None:
raise ValueError('No CRS set')
# TODO: Change the resolution based on the lowest resolution in the inputs
gdal.Warp(str(dest_file), input_raster, dstSRS=dest_crs, srcSRS=in_crs, xRes=6e-06, yRes=6e-06)
return Path(dest_file).resolve()
def create_mosaic(in_files, out_file):
"""
Creates mosaic from in_files.
:param in_files: list of paths to input files
:param out_file: path to output mosaic
:return: path to output file
"""
# This is some hacky, dumb shit
# There is a limit on how many file descriptors we can have open at once
# So we will up that limit for a bit and then set it back
if os.name == 'posix':
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if len(in_files) >= soft:
resource.setrlimit(resource.RLIMIT_NOFILE, (len(in_files) * 2, hard))
elif os.name == 'nt':
import win32file
soft = win32file._getmaxstdio()
if len(in_files) >= soft:
win32file._setmaxstdio(len(in_files) * 2)
file_objs = []
for file in in_files:
src = rasterio.open(file)
file_objs.append(src)
mosaic, out_trans = rasterio.merge.merge(file_objs)
out_meta = src.meta.copy()
out_meta.update({"driver": "GTiff",
"height": mosaic.shape[1],
"width": mosaic.shape[2],
"transform": out_trans
}
)
with rasterio.open(out_file, "w", **out_meta) as dest:
dest.write(mosaic)
# Reset soft limit
if os.name == 'posix':
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if len(in_files) >= soft:
resource.setrlimit(resource.RLIMIT_NOFILE, (len(in_files) * 2, hard))
elif os.name == 'nt':
import win32file
soft = win32file._getmaxstdio()
if len(in_files) >= soft:
win32file._setmaxstdio(len(in_files) * 2)
return Path(out_file).resolve()
def get_intersect(*args):
"""
Computes intersect of input rasters.
:param args: list of files to compute
:return: tuple of intersect in (left, bottom, right, top)
"""
# TODO: Calculate real intersection.
left = []
bottom = []
right = []
top = []
for arg in args:
raster = rasterio.open(arg)
left.append(raster.bounds[0])
bottom.append(raster.bounds[1])
right.append(raster.bounds[2])
top.append(raster.bounds[3])
intersect = (max(left), max(bottom), min(right), min(top))
return intersect
def check_dims(arr, w, h):
"""
Check dimensions of output tiles and pad
:param arr: numpy array
:param w: tile width
:param h: tile height
:return: tile of same dimensions specified
"""
dims = arr.shape
if dims[1] != w or dims[2] != h:
result = np.zeros((arr.shape[0],w,h)).astype(arr.dtype)
result[:arr.shape[0],:arr.shape[1],:arr.shape[2]] = arr
else:
result = arr
return result
def create_chips(in_raster, out_dir, intersect, tile_width=1024, tile_height=1024):
"""
Creates chips from mosaic that fall inside the intersect
:param in_raster: mosaic to create chips from
:param out_dir: path to write chips
:param intersect: bounds of chips to create
:param tile_width: width of tiles to chip
:param tile_height: height of tiles to chip
:return: list of path to chips
"""
def get_intersect_win(rio_obj):
"""
Calculate rasterio window from intersect
:param rio_obj: rasterio dataset
:return: window of intersect
"""
xy_ul = rasterio.transform.rowcol(rio_obj.transform, intersect[0], intersect[3])
xy_lr = rasterio.transform.rowcol(rio_obj.transform, intersect[2], intersect[1])
int_window = rasterio.windows.Window(xy_ul[1], xy_ul[0],
abs(xy_ul[1] - xy_lr[1]),
abs(xy_ul[0] - xy_lr[0]))
return int_window
def get_tiles(ds, width, height):
"""
Create chip tiles generator
:param ds: rasterio dataset
:param width: tile width
:param height: tile height
:return: generator of rasterio windows and transforms for each tile to be created
"""
intersect_window = get_intersect_win(ds)
offsets = product(range(intersect_window.col_off, intersect_window.width + intersect_window.col_off, width),
range(intersect_window.row_off, intersect_window.height + intersect_window.row_off, height))
for col_off, row_off in offsets:
window = windows.Window(col_off=col_off, row_off=row_off, width=width, height=height).intersection(intersect_window)
transform = windows.transform(window, ds.transform)
yield window, transform
chips = []
with rasterio.open(in_raster) as inds:
meta = inds.meta.copy()
for idx, (window, transform) in enumerate(tqdm(get_tiles(inds, tile_width, tile_height))):
meta['transform'] = transform
meta['width'], meta['height'] = tile_width, tile_height
output_filename = f'{idx}_{out_dir.parts[-1]}.tif'
outpath = out_dir.joinpath(output_filename)
with rasterio.open(outpath, 'w', **meta) as outds:
chip_arr = inds.read(window=window)
out_arr = check_dims(chip_arr, tile_width, tile_height)
assert(out_arr.shape[1] == tile_width)
assert(out_arr.shape[2] == tile_height)
outds.write(out_arr)
chips.append(outpath.resolve())
return chips
def create_shapefile(in_files, out_shapefile, dest_crs):
polygons = []
for idx, f in enumerate(in_files):
src = rasterio.open(f)
crs = src.crs
transform = src.transform
bnd = src.read(1)
polys = list(shapes(bnd, transform=transform))
for geom, val in polys:
if val == 0:
continue
polygons.append((Polygon(shape(geom)), val))
shp_schema = {
'geometry': 'Polygon',
'properties': {'dmg': 'int'}
}
# Write out all the multipolygons to the same file
with fiona.open(out_shapefile, 'w', 'ESRI Shapefile', shp_schema,
dest_crs) as shp:
for polygon, px_val in polygons:
shp.write({
'geometry': mapping(polygon),
'properties': {'dmg': int(px_val)}
})
####
| 29.988281
| 128
| 0.621076
|
160a4b29974d272ee5c28b164c06d23de5eb9fdf
| 122
|
py
|
Python
|
wsgi.py
|
Mirror-Score/playstore-app-data
|
51ca1f0ad769adf07a2bd220a1c73ae36d3a2a78
|
[
"MIT"
] | null | null | null |
wsgi.py
|
Mirror-Score/playstore-app-data
|
51ca1f0ad769adf07a2bd220a1c73ae36d3a2a78
|
[
"MIT"
] | null | null | null |
wsgi.py
|
Mirror-Score/playstore-app-data
|
51ca1f0ad769adf07a2bd220a1c73ae36d3a2a78
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from main import create_app
app = create_app()
if __name__ == "__main__":
app.run(debug=True)
| 15.25
| 27
| 0.696721
|
7be9f5c5c7f29801152ef7f95d6be33506fdcd78
| 1,277
|
py
|
Python
|
sharpy-sc2/sharpy/plans/tactics/terran/continue_building.py
|
ProfessorQu/Sharpy-Bot
|
a8bf7ebbed113f5bf0f6891c9ca45fac9edfb26e
|
[
"MIT"
] | 48
|
2019-11-25T20:02:27.000Z
|
2022-02-28T00:16:21.000Z
|
sharpy-sc2/sharpy/plans/tactics/terran/continue_building.py
|
ProfessorQu/Sharpy-Bot
|
a8bf7ebbed113f5bf0f6891c9ca45fac9edfb26e
|
[
"MIT"
] | 48
|
2020-03-10T17:08:04.000Z
|
2022-02-22T08:21:12.000Z
|
sharpy-sc2/sharpy/plans/tactics/terran/continue_building.py
|
ProfessorQu/Sharpy-Bot
|
a8bf7ebbed113f5bf0f6891c9ca45fac9edfb26e
|
[
"MIT"
] | 25
|
2019-12-01T18:14:54.000Z
|
2022-03-24T01:14:53.000Z
|
import sc2
from sharpy.plans.acts import ActBase
from sc2 import UnitTypeId, AbilityId
from sc2.unit import Unit
REACTORS = {UnitTypeId.BARRACKSREACTOR, UnitTypeId.FACTORYREACTOR, UnitTypeId.STARPORTREACTOR, UnitTypeId.REACTOR}
TECHLABS = {UnitTypeId.BARRACKSTECHLAB, UnitTypeId.FACTORYTECHLAB, UnitTypeId.STARPORTTECHLAB, UnitTypeId.TECHLAB}
TECHLABS_AND_REACTORS = REACTORS.union(TECHLABS)
class ContinueBuilding(ActBase):
async def execute(self) -> bool:
building: Unit
buildings = self.ai.structures.not_ready.exclude_type(TECHLABS_AND_REACTORS)
scv_constructing = self.ai.units.filter(lambda unit: unit.is_constructing_scv)
if buildings.amount > scv_constructing.amount:
for building in buildings:
if self.knowledge.unit_values.build_time(building.type_id) > 0 and not scv_constructing.closer_than(
building.radius + 0.5, building
):
self.knowledge.print(f"[Building continue] {building.type_id} {building.position}")
workers = self.roles.free_workers()
if workers.exists:
scv = workers.closest_to(building)
scv(AbilityId.SMART, building)
return True
| 44.034483
| 116
| 0.68285
|
5d53fe3be68f8dc2ffde3e0bfe584f498dd05366
| 4,079
|
py
|
Python
|
S3Backup/__init__.py
|
mgoodfellow/s3-backup
|
b0079a9fa11722e0961d60a96359b8fe7e50d6a1
|
[
"MIT"
] | 3
|
2015-06-30T19:55:50.000Z
|
2016-11-17T23:02:07.000Z
|
S3Backup/__init__.py
|
mgoodfellow/s3-backup
|
b0079a9fa11722e0961d60a96359b8fe7e50d6a1
|
[
"MIT"
] | 1
|
2015-08-27T19:23:25.000Z
|
2015-09-07T20:56:52.000Z
|
S3Backup/__init__.py
|
mgoodfellow/s3-backup
|
b0079a9fa11722e0961d60a96359b8fe7e50d6a1
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015 Mike Goodfellow
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
from S3Backup import config_loader
from time import strftime, gmtime
import boto.ses
logger = logging.getLogger(name='S3BackupTool')
class S3BackupTool:
def __init__(self, config_file="config.json"):
logger.info('Initialising...')
try:
self.CONFIGURATION, self.PLANS = config_loader.config_setup(config_file)
except Exception, e:
logger.fatal('Failed to load configuration: %s', e)
raise e
logger.info('Loaded configuration')
def run_plans(self):
if len(self.PLANS) == 0:
logger.warn('No plans to execute')
return
counter = 1
for plan in self.PLANS:
logger.info('Executing plan %d of %d', counter, len(self.PLANS))
try:
updated, output_file = plan.run()
self.__send_success_email(plan, updated, output_file)
except Exception, e:
logger.error('Failed to run plan: %s', e)
self.__send_failure_email(plan, e)
counter += 1
logger.info('Finished running backup plans')
def __send_success_email(self, plan, updated, output_file):
subject = '[S3-Backup] [SUCCESS] - Plan: %s' % plan.name
body = 'The backup plan, %s, run at %s was SUCCESSFUL\n\n' % (
plan.name,
strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
if updated:
body += 'The backup set had changed, so a new backup was uploaded: %s' % output_file
else:
body += 'The backup set had not changed. No new backup uploaded'
self.__send_status_email(subject, body)
def __send_failure_email(self, plan, exception):
subject = '[S3-Backup] [FAILURE] - Plan: %s' % plan.name
body = 'The backup plan, %s, run at %s was a FAILURE\n\n' % (
plan.name,
strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()))
body += '\n\nDetailed failure information:\n\n%s' % exception
self.__send_status_email(subject, body)
def __send_status_email(self, subject, body):
if self.CONFIGURATION['EMAIL_FROM'] is None or self.CONFIGURATION['EMAIL_TO'] is None:
logger.debug('Email not provided, so status update not sent')
return
conn = boto.ses.connect_to_region(
self.CONFIGURATION['AWS_REGION'],
aws_access_key_id=self.CONFIGURATION['AWS_KEY'],
aws_secret_access_key=self.CONFIGURATION['AWS_SECRET'])
try:
conn.send_email(
self.CONFIGURATION['EMAIL_FROM'],
subject,
body,
[self.CONFIGURATION['EMAIL_TO']])
except Exception, e:
logger.error('Failed to send email to {0:s} with subject {1:s}'.format(self.CONFIGURATION['EMAIL_TO'],
subject),
e)
| 37.081818
| 114
| 0.629076
|
30b6f9ffa64f3c7b0161914dfb0d2a9486f41104
| 9,202
|
py
|
Python
|
modin/engines/base/io/file_dispatcher.py
|
palash247/modin
|
3f1e275b67a760f09db6944600c4b7f5e601cbde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/engines/base/io/file_dispatcher.py
|
palash247/modin
|
3f1e275b67a760f09db6944600c4b7f5e601cbde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
modin/engines/base/io/file_dispatcher.py
|
palash247/modin
|
3f1e275b67a760f09db6944600c4b7f5e601cbde
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Module houses `FileDispatcher` class.
`FileDispatcher` can be used as abstract base class for dispatchers of specific file formats or
for direct files processing.
"""
import os
import re
from modin.config import Backend
S3_ADDRESS_REGEX = re.compile("[sS]3://(.*?)/(.*)")
NOT_IMPLEMENTED_MESSAGE = "Implement in children classes!"
class FileDispatcher:
"""
Class handles util functions for reading data from different kinds of files.
Notes
-----
`_read`, `deploy`, `parse` and `materialize` are abstract methods and should be
implemented in the child classes (functions signatures can differ between child
classes).
"""
frame_cls = None
frame_partition_cls = None
query_compiler_cls = None
@classmethod
def read(cls, *args, **kwargs):
"""
Read data according passed `args` and `kwargs`.
Parameters
----------
*args : iterable
Positional arguments to be passed into `_read` function.
**kwargs : dict
Keywords arguments to be passed into `_read` function.
Returns
-------
query_compiler : BaseQueryCompiler
Query compiler with imported data for further processing.
Notes
-----
`read` is high-level function that calls specific for defined backend, engine and
dispatcher class `_read` function with passed parameters and performs some
postprocessing work on the resulting query_compiler object.
"""
query_compiler = cls._read(*args, **kwargs)
# TODO (devin-petersohn): Make this section more general for non-pandas kernel
# implementations.
if Backend.get() == "Pandas":
import pandas as kernel_lib
elif Backend.get() == "Cudf":
import cudf as kernel_lib
else:
raise NotImplementedError("FIXME")
if hasattr(query_compiler, "dtypes") and any(
isinstance(t, kernel_lib.CategoricalDtype) for t in query_compiler.dtypes
):
dtypes = query_compiler.dtypes
return query_compiler.astype(
{
t: dtypes[t]
for t in dtypes.index
if isinstance(dtypes[t], kernel_lib.CategoricalDtype)
}
)
return query_compiler
@classmethod
def _read(cls, *args, **kwargs):
"""
Perform reading of the data from file.
Should be implemented in the child class.
Parameters
----------
*args : iterable
Positional arguments of the function.
**kwargs : dict
Keywords arguments of the function.
"""
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
@classmethod
def get_path(cls, file_path):
"""
Process `file_path` in accordance to it's type.
Parameters
----------
file_path : str
String that represents the path to the file (paths to S3 buckets
are also acceptable).
Returns
-------
str
Updated or verified `file_path` parameter.
Notes
-----
if `file_path` is an S3 bucket, parameter will be returned as is, otherwise
absolute path will be returned.
"""
if S3_ADDRESS_REGEX.search(file_path):
return file_path
else:
return os.path.abspath(file_path)
@classmethod
def file_open(cls, file_path, mode="rb", compression="infer"):
"""
Get the file handle from `file_path`.
Parameters
----------
file_path : str
String that represents the path to the file (paths to S3 buckets
are also acceptable).
mode : str, default: "rb"
String, which defines which mode file should be open.
compression : str, default: "infer"
File compression name (acceptable values are "gzip", "bz2", "xz" and "zip").
Returns
-------
file-like
file-like object of the `file_path`.
"""
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match is not None:
if file_path[0] == "S":
file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
try:
return s3fs.open(file_path)
except NoCredentialsError:
s3fs = S3FS.S3FileSystem(anon=True)
return s3fs.open(file_path)
elif compression == "gzip":
import gzip
return gzip.open(file_path, mode=mode)
elif compression == "bz2":
import bz2
return bz2.BZ2File(file_path, mode=mode)
elif compression == "xz":
import lzma
return lzma.LZMAFile(file_path, mode=mode)
elif compression == "zip":
import zipfile
zf = zipfile.ZipFile(file_path, mode=mode.replace("b", ""))
if zf.mode == "w":
return zf
elif zf.mode == "r":
zip_names = zf.namelist()
if len(zip_names) == 1:
f = zf.open(zip_names.pop())
return f
elif len(zip_names) == 0:
raise ValueError(
"Zero files found in ZIP file {}".format(file_path)
)
else:
raise ValueError(
"Multiple files found in ZIP file."
" Only one file per ZIP: {}".format(zip_names)
)
return open(file_path, mode=mode)
@classmethod
def file_size(cls, f):
"""
Get the size of file associated with file handle `f`.
Parameters
----------
f : file-like object
File-like object, that should be used to get file size.
Returns
-------
int
File size in bytes.
"""
cur_pos = f.tell()
f.seek(0, os.SEEK_END)
size = f.tell()
f.seek(cur_pos, os.SEEK_SET)
return size
@classmethod
def file_exists(cls, file_path):
"""
Check if `file_path` exists.
Parameters
----------
file_path : str
String that represents the path to the file (paths to S3 buckets
are also acceptable).
Returns
-------
bool
Whether file exists or not.
"""
if isinstance(file_path, str):
match = S3_ADDRESS_REGEX.search(file_path)
if match is not None:
if file_path[0] == "S":
file_path = "{}{}".format("s", file_path[1:])
import s3fs as S3FS
from botocore.exceptions import NoCredentialsError
s3fs = S3FS.S3FileSystem(anon=False)
exists = False
try:
exists = s3fs.exists(file_path) or exists
except NoCredentialsError:
pass
s3fs = S3FS.S3FileSystem(anon=True)
return exists or s3fs.exists(file_path)
return os.path.exists(file_path)
@classmethod
def deploy(cls, func, args, num_returns): # noqa: PR01
"""
Deploy remote task.
Should be implemented in the task class (for example in the `RayTask`).
"""
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
def parse(self, func, args, num_returns): # noqa: PR01
"""
Parse file's data in the worker process.
Should be implemented in the parser class (for example in the `PandasCSVParser`).
"""
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
@classmethod
def materialize(cls, obj_id): # noqa: PR01
"""
Get results from worker.
Should be implemented in the task class (for example in the `RayTask`).
"""
raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
| 32.747331
| 95
| 0.561726
|
dbe7dc436b36245298011a914f879e22b2cfb56f
| 633
|
py
|
Python
|
Courses/Python/Django/Digging into Django/4.Users/2.User Authentication/4.8 Registration URL/main/urls.py
|
kamranhossain/CodeSchoolExercises
|
5d6cd08fd669a85cfddbd8f061fbce4b7dc6e2f5
|
[
"MIT"
] | null | null | null |
Courses/Python/Django/Digging into Django/4.Users/2.User Authentication/4.8 Registration URL/main/urls.py
|
kamranhossain/CodeSchoolExercises
|
5d6cd08fd669a85cfddbd8f061fbce4b7dc6e2f5
|
[
"MIT"
] | null | null | null |
Courses/Python/Django/Digging into Django/4.Users/2.User Authentication/4.8 Registration URL/main/urls.py
|
kamranhossain/CodeSchoolExercises
|
5d6cd08fd669a85cfddbd8f061fbce4b7dc6e2f5
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from django.conf import settings
from django.views import static
from . import views
urlpatterns = [
url(r'^$', views.home, name = 'home'),
url(r'^([0-9]+)/$', views.detail, name = 'detail'),
url(r'^post_location/$', views.post_location, name = 'post_location'),
url(r'^user/(\w+)/$', views.profile, name='profile'),
url(r'^login/$', views.login_view, name='login'),
url(r'^register/$', views.register, name='register'),
]
if settings.DEBUG:
urlpatterns += [
url(r'^media/(?P<path>.*)$', static.serve,
{'document_root': settings.MEDIA_ROOT,}),
]
| 30.142857
| 74
| 0.619273
|
fad43869044b6474cdb25191dd0f7c41e7d335ea
| 13,365
|
py
|
Python
|
pycxsimulator.py
|
itbergl/PyCX
|
b5cc24454f37102388eb3181dbded6015e23a45c
|
[
"BSD-2-Clause-FreeBSD"
] | 176
|
2019-12-18T11:44:28.000Z
|
2022-03-27T09:09:33.000Z
|
pycxsimulator.py
|
itbergl/PyCX
|
b5cc24454f37102388eb3181dbded6015e23a45c
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2020-03-29T00:51:25.000Z
|
2020-07-19T11:08:32.000Z
|
pycxsimulator.py
|
itbergl/PyCX
|
b5cc24454f37102388eb3181dbded6015e23a45c
|
[
"BSD-2-Clause-FreeBSD"
] | 56
|
2019-12-18T19:04:12.000Z
|
2022-03-22T09:35:33.000Z
|
## "pycxsimulator.py"
## Dynamic, interactive simulation GUI for PyCX
##
## Project website:
## https://github.com/hsayama/PyCX
##
## Initial development by:
## Chun Wong
## email@chunwong.net
##
## Revisions by:
## Hiroki Sayama
## sayama@binghamton.edu
##
## Copyright 2012 Chun Wong
## Copyright 2012-2019 Hiroki Sayama
##
## Simulation control & GUI extensions
## Copyright 2013 Przemyslaw Szufel & Bogumil Kaminski
## {pszufe, bkamins}@sgh.waw.pl
##
## Fixing errors due to "the grid and pack problem" by:
## Toshihiro Tanizawa
## tanizawa@ee.kochi-ct.ac.jp
## began at 2016-06-15(Wed) 17:10:17
## fixed grid() and pack() problem on 2016-06-21(Tue) 18:29:40
##
## various bug fixes and updates by Steve Morgan on 3/28/2020
import matplotlib
#System check added by Steve Morgan
import platform #SM 3/28/2020
if platform.system() == 'Windows': #SM 3/28/2020
backend = 'TkAgg' #SM 3/28/2020
else: #SM 3/28/2020
backend = 'Qt5Agg' #SM 3/28/2020
matplotlib.use(backend) #SM 3/28/2020
import matplotlib.pyplot as plt #SM 3/28/2020
## version check added by Hiroki Sayama on 01/08/2019
import sys
if sys.version_info[0] == 3: # Python 3
from tkinter import *
from tkinter.ttk import Notebook
else: # Python 2
from Tkinter import *
from ttk import Notebook
## suppressing matplotlib deprecation warnings (especially with subplot) by Hiroki Sayama on 06/29/2020
import warnings
warnings.filterwarnings("ignore", category = matplotlib.cbook.MatplotlibDeprecationWarning)
class GUI:
# Constructor
def __init__(self, title='PyCX Simulator', interval=0, stepSize=1, parameterSetters=[]):
## all GUI variables moved to inside constructor by Hiroki Sayama 10/09/2018
self.titleText = title
self.timeInterval = interval
self.stepSize = stepSize
self.parameterSetters = parameterSetters
self.varEntries = {}
self.statusStr = ""
self.running = False
self.modelFigure = None
self.currentStep = 0
# initGUI() removed by Hiroki Sayama 10/09/2018
#create root window
self.rootWindow = Tk()
self.statusText = StringVar(self.rootWindow, value=self.statusStr) # at this point, statusStr = ""
# added "self.rootWindow" above by Hiroki Sayama 10/09/2018
self.setStatusStr("Simulation not yet started")
self.rootWindow.wm_title(self.titleText) # titleText = 'PyCX Simulator'
self.rootWindow.protocol('WM_DELETE_WINDOW', self.quitGUI)
self.rootWindow.geometry('450x300')
self.rootWindow.columnconfigure(0, weight=1)
self.rootWindow.rowconfigure(0, weight=1)
self.notebook = Notebook(self.rootWindow)
# self.notebook.grid(row=0,column=0,padx=2,pady=2,sticky='nswe') # commented out by toshi on 2016-06-21(Tue) 18:30:25
self.notebook.pack(side=TOP, padx=2, pady=2)
# added "self.rootWindow" by Hiroki Sayama 10/09/2018
self.frameRun = Frame(self.rootWindow)
self.frameSettings = Frame(self.rootWindow)
self.frameParameters = Frame(self.rootWindow)
self.frameInformation = Frame(self.rootWindow)
self.notebook.add(self.frameRun,text="Run")
self.notebook.add(self.frameSettings,text="Settings")
self.notebook.add(self.frameParameters,text="Parameters")
self.notebook.add(self.frameInformation,text="Info")
self.notebook.pack(expand=NO, fill=BOTH, padx=5, pady=5 ,side=TOP)
# self.notebook.grid(row=0, column=0, padx=5, pady=5, sticky='nswe') # commented out by toshi on 2016-06-21(Tue) 18:31:02
self.status = Label(self.rootWindow, width=40,height=3, relief=SUNKEN, bd=1, textvariable=self.statusText)
# self.status.grid(row=1,column=0,padx=5,pady=5,sticky='nswe') # commented out by toshi on 2016-06-21(Tue) 18:31:17
self.status.pack(side=TOP, fill=X, padx=5, pady=5, expand=NO)
# -----------------------------------
# frameRun
# -----------------------------------
# buttonRun
self.runPauseString = StringVar(self.rootWindow) # added "self.rootWindow" by Hiroki Sayama 10/09/2018
self.runPauseString.set("Run")
self.buttonRun = Button(self.frameRun,width=30,height=2,textvariable=self.runPauseString,command=self.runEvent)
self.buttonRun.pack(side=TOP, padx=5, pady=5)
self.showHelp(self.buttonRun,"Runs the simulation (or pauses the running simulation)")
# buttonStep
self.buttonStep = Button(self.frameRun,width=30,height=2,text='Step Once',command=self.stepOnce)
self.buttonStep.pack(side=TOP, padx=5, pady=5)
self.showHelp(self.buttonStep,"Steps the simulation only once")
# buttonReset
self.buttonReset = Button(self.frameRun,width=30,height=2,text='Reset',command=self.resetModel)
self.buttonReset.pack(side=TOP, padx=5, pady=5)
self.showHelp(self.buttonReset,"Resets the simulation")
# -----------------------------------
# frameSettings
# -----------------------------------
can = Canvas(self.frameSettings)
lab = Label(can, width=25,height=1,text="Step size ", justify=LEFT, anchor=W,takefocus=0)
lab.pack(side='left')
self.stepScale = Scale(can,from_=1, to=50, resolution=1,command=self.changeStepSize,orient=HORIZONTAL, width=25,length=150)
self.stepScale.set(self.stepSize)
self.showHelp(self.stepScale,"Skips model redraw during every [n] simulation steps\nResults in a faster model run.")
self.stepScale.pack(side='left')
can.pack(side='top')
can = Canvas(self.frameSettings)
lab = Label(can, width=25,height=1,text="Step visualization delay in ms ", justify=LEFT, anchor=W,takefocus=0)
lab.pack(side='left')
self.stepDelay = Scale(can,from_=0, to=max(2000,self.timeInterval),
resolution=10,command=self.changeStepDelay,orient=HORIZONTAL, width=25,length=150)
self.stepDelay.set(self.timeInterval)
self.showHelp(self.stepDelay,"The visualization of each step is delays by the given number of milliseconds.")
self.stepDelay.pack(side='left')
can.pack(side='top')
# --------------------------------------------
# frameInformation
# --------------------------------------------
scrollInfo = Scrollbar(self.frameInformation)
self.textInformation = Text(self.frameInformation, width=45,height=13,bg='lightgray',wrap=WORD,font=("Courier",10))
scrollInfo.pack(side=RIGHT, fill=Y)
self.textInformation.pack(side=LEFT,fill=BOTH,expand=YES)
scrollInfo.config(command=self.textInformation.yview)
self.textInformation.config(yscrollcommand=scrollInfo.set)
# --------------------------------------------
# ParameterSetters
# --------------------------------------------
for variableSetter in self.parameterSetters:
can = Canvas(self.frameParameters)
lab = Label(can, width=25,height=1,text=variableSetter.__name__+" ",anchor=W,takefocus=0)
lab.pack(side='left')
ent = Entry(can, width=11)
ent.insert(0, str(variableSetter()))
if variableSetter.__doc__ != None and len(variableSetter.__doc__) > 0:
self.showHelp(ent,variableSetter.__doc__.strip())
ent.pack(side='left')
can.pack(side='top')
self.varEntries[variableSetter]=ent
if len(self.parameterSetters) > 0:
self.buttonSaveParameters = Button(self.frameParameters,width=50,height=1,
command=self.saveParametersCmd,text="Save parameters to the running model",state=DISABLED)
self.showHelp(self.buttonSaveParameters,
"Saves the parameter values.\nNot all values may take effect on a running model\nA model reset might be required.")
self.buttonSaveParameters.pack(side='top',padx=5,pady=5)
self.buttonSaveParametersAndReset = Button(self.frameParameters,width=50,height=1,
command=self.saveParametersAndResetCmd,text="Save parameters to the model and reset the model")
self.showHelp(self.buttonSaveParametersAndReset,"Saves the given parameter values and resets the model")
self.buttonSaveParametersAndReset.pack(side='top',padx=5,pady=5)
# <<<<< Init >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def setStatusStr(self,newStatus):
self.statusStr = newStatus
self.statusText.set(self.statusStr)
# model control functions for changing parameters
def changeStepSize(self,val):
self.stepSize = int(val)
def changeStepDelay(self,val):
self.timeInterval= int(val)
def saveParametersCmd(self):
for variableSetter in self.parameterSetters:
variableSetter(float(self.varEntries[variableSetter].get()))
self.setStatusStr("New parameter values have been set")
def saveParametersAndResetCmd(self):
self.saveParametersCmd()
self.resetModel()
# <<<< runEvent >>>>>
# This event is envoked when "Run" button is clicked.
def runEvent(self):
self.running = not self.running
if self.running:
self.rootWindow.after(self.timeInterval,self.stepModel)
self.runPauseString.set("Pause")
self.buttonStep.configure(state=DISABLED)
self.buttonReset.configure(state=DISABLED)
if len(self.parameterSetters) > 0:
self.buttonSaveParameters.configure(state=NORMAL)
self.buttonSaveParametersAndReset.configure(state=DISABLED)
else:
self.runPauseString.set("Continue Run")
self.buttonStep.configure(state=NORMAL)
self.buttonReset.configure(state=NORMAL)
if len(self.parameterSetters) > 0:
self.buttonSaveParameters.configure(state=NORMAL)
self.buttonSaveParametersAndReset.configure(state=NORMAL)
def stepModel(self):
if self.running:
self.modelStepFunc()
self.currentStep += 1
self.setStatusStr("Step "+str(self.currentStep))
self.status.configure(foreground='black')
if (self.currentStep) % self.stepSize == 0:
self.drawModel()
self.rootWindow.after(int(self.timeInterval*1.0/self.stepSize),self.stepModel)
def stepOnce(self):
self.running = False
self.runPauseString.set("Continue Run")
self.modelStepFunc()
self.currentStep += 1
self.setStatusStr("Step "+str(self.currentStep))
self.drawModel()
if len(self.parameterSetters) > 0:
self.buttonSaveParameters.configure(state=NORMAL)
def resetModel(self):
self.running = False
self.runPauseString.set("Run")
self.modelInitFunc()
self.currentStep = 0;
self.setStatusStr("Model has been reset")
self.drawModel()
def drawModel(self):
plt.ion() #SM 3/26/2020
if self.modelFigure == None or self.modelFigure.canvas.manager.window == None:
self.modelFigure = plt.figure() #SM 3/26/2020
self.modelDrawFunc()
self.modelFigure.canvas.manager.window.update()
plt.show() # bug fix by Hiroki Sayama in 2016 #SM 3/26/2020
def start(self,func=[]):
if len(func)==3:
self.modelInitFunc = func[0]
self.modelDrawFunc = func[1]
self.modelStepFunc = func[2]
if (self.modelStepFunc.__doc__ != None and len(self.modelStepFunc.__doc__)>0):
self.showHelp(self.buttonStep,self.modelStepFunc.__doc__.strip())
if (self.modelInitFunc.__doc__ != None and len(self.modelInitFunc.__doc__)>0):
self.textInformation.config(state=NORMAL)
self.textInformation.delete(1.0, END)
self.textInformation.insert(END, self.modelInitFunc.__doc__.strip())
self.textInformation.config(state=DISABLED)
self.modelInitFunc()
self.drawModel()
self.rootWindow.mainloop()
def quitGUI(self):
self.running = False # HS 06/29/2020
self.rootWindow.quit()
plt.close('all') # HS 06/29/2020
self.rootWindow.destroy()
def showHelp(self, widget,text):
def setText(self):
self.statusText.set(text)
self.status.configure(foreground='blue')
def showHelpLeave(self):
self.statusText.set(self.statusStr)
self.status.configure(foreground='black')
widget.bind("<Enter>", lambda e : setText(self))
widget.bind("<Leave>", lambda e : showHelpLeave(self))
| 43.676471
| 150
| 0.609502
|
b06d64cfac07e3ef798f197aa90d29e9f650f419
| 384
|
py
|
Python
|
0x06-python-classes/102-main.py
|
gogomillan/holbertonschool-higher_level_programming
|
1549ffc4fdc284271684321ff6edd882a314193a
|
[
"MIT"
] | null | null | null |
0x06-python-classes/102-main.py
|
gogomillan/holbertonschool-higher_level_programming
|
1549ffc4fdc284271684321ff6edd882a314193a
|
[
"MIT"
] | null | null | null |
0x06-python-classes/102-main.py
|
gogomillan/holbertonschool-higher_level_programming
|
1549ffc4fdc284271684321ff6edd882a314193a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
Square = __import__('102-square').Square
s_5 = Square(5)
s_6 = Square(6)
if s_5 < s_6:
print("Square 5 < Square 6")
if s_5 <= s_6:
print("Square 5 <= Square 6")
if s_5 == s_6:
print("Square 5 == Square 6")
if s_5 != s_6:
print("Square 5 != Square 6")
if s_5 > s_6:
print("Square 5 > Square 6")
if s_5 >= s_6:
print("Square 5 >= Square 6")
| 20.210526
| 40
| 0.588542
|
7ced26e6a014b482cbfb42342d6605c24196ad73
| 484
|
py
|
Python
|
p1.py
|
Morwar22/BigData
|
7f97bc50b9566fb3a323e856c677d91fe54fd2ac
|
[
"Apache-2.0"
] | null | null | null |
p1.py
|
Morwar22/BigData
|
7f97bc50b9566fb3a323e856c677d91fe54fd2ac
|
[
"Apache-2.0"
] | null | null | null |
p1.py
|
Morwar22/BigData
|
7f97bc50b9566fb3a323e856c677d91fe54fd2ac
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pyspark import SparkConf, SparkContext
import re
word = sys.argv[1]
conf = SparkConf().setAppName('WordCount')
sc = SparkContext(conf=conf)
def spliter(line):
return re.sub(r'\W+', ' ', line).split()
def searcher(line):
if word in line:
return {line, ''}
sc.textFile('input.txt')
rdd = sc.parallelize()
rdd = rdd.map(spliter())
rdd = rdd.filter(searcher())
rdd = rdd.reduceByKey() \
.saveAsTextFile("output.txt")
| 17.925926
| 45
| 0.628099
|
05edfe145308bdca1eae849098a222bc20709c49
| 108
|
py
|
Python
|
config.py
|
anserion/pyFlaskNotebook
|
cac9d3615aa264928eb2bed01a3d06657c05e3ee
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
anserion/pyFlaskNotebook
|
cac9d3615aa264928eb2bed01a3d06657c05e3ee
|
[
"Apache-2.0"
] | null | null | null |
config.py
|
anserion/pyFlaskNotebook
|
cac9d3615aa264928eb2bed01a3d06657c05e3ee
|
[
"Apache-2.0"
] | null | null | null |
CSRF_ENABLED = True
SECRET_KEY = '1234567890'
flask_port=8080
good_name = 'yandex'
good_password = '123456'
| 18
| 25
| 0.777778
|
12fd0487f71feb342ac5c0fcb7c08ab66dd4f7ed
| 2,418
|
py
|
Python
|
pytest_testrail_api_client/test_rail.py
|
Slamnlc/test-rail-client
|
56ed9fb5d6ad0128458709ebebce6b6b51782a9f
|
[
"MIT"
] | null | null | null |
pytest_testrail_api_client/test_rail.py
|
Slamnlc/test-rail-client
|
56ed9fb5d6ad0128458709ebebce6b6b51782a9f
|
[
"MIT"
] | null | null | null |
pytest_testrail_api_client/test_rail.py
|
Slamnlc/test-rail-client
|
56ed9fb5d6ad0128458709ebebce6b6b51782a9f
|
[
"MIT"
] | null | null | null |
from pytest_testrail_api_client.api.cases_api import CasesApi
from pytest_testrail_api_client.api.congif_api import ConfigsApi
from pytest_testrail_api_client.api.milestones_api import MilestonesApi
from pytest_testrail_api_client.api.plans_api import PlansApi
from pytest_testrail_api_client.api.project_api import ProjectApi
from pytest_testrail_api_client.api.results_api import ResultsApi
from pytest_testrail_api_client.api.runs_api import RunsApi
from pytest_testrail_api_client.api.sections_api import SectionsApi
from pytest_testrail_api_client.api.service_api import ServiceApi
from pytest_testrail_api_client.api.small_api import TestsApi, StatusesApi, CaseTypesApi, TemplatesApi, CaseFieldsApi, \
ResultsFieldsApi, PrioritiesApi, SharedStepsApi, ReportsApi
from pytest_testrail_api_client.api.suites_api import SuitesApi
from pytest_testrail_api_client.api.user_api import UsersApi
from pytest_testrail_api_client.modules.session import Session
class TestRail(Session):
@property
def projects(self):
return ProjectApi(self)
@property
def tests(self):
return TestsApi(self)
@property
def cases(self):
return CasesApi(self)
@property
def statuses(self):
return StatusesApi(self)
@property
def users(self):
return UsersApi(self)
@property
def configs(self):
return ConfigsApi(self)
@property
def case_types(self):
return CaseTypesApi(self)
@property
def suites(self):
return SuitesApi(self)
@property
def templates(self):
return TemplatesApi(self)
@property
def case_fields(self):
return CaseFieldsApi(self)
@property
def results_fields(self):
return ResultsFieldsApi(self)
@property
def priorities(self):
return PrioritiesApi(self)
@property
def sections(self):
return SectionsApi(self)
@property
def milestones(self):
return MilestonesApi(self)
@property
def plans(self):
return PlansApi(self)
@property
def results(self):
return ResultsApi(self)
@property
def runs(self):
return RunsApi(self)
@property
def service(self):
return ServiceApi(self)
@property
def shared_steps(self):
return SharedStepsApi(self)
@property
def reports(self):
return ReportsApi(self)
| 24.673469
| 120
| 0.724152
|
f0561afc1493802722cefe55909fe00579e94918
| 1,291
|
py
|
Python
|
examples/cbc_iris.py
|
ChristophRaab/prototorch_models
|
75a39f5b03110a56b3d93ee00b3397858f4222a3
|
[
"MIT"
] | 4
|
2021-05-05T07:27:11.000Z
|
2021-12-24T08:01:45.000Z
|
examples/cbc_iris.py
|
ChristophRaab/prototorch_models
|
75a39f5b03110a56b3d93ee00b3397858f4222a3
|
[
"MIT"
] | 6
|
2021-05-06T09:49:37.000Z
|
2021-11-15T10:43:09.000Z
|
examples/cbc_iris.py
|
ChristophRaab/prototorch_models
|
75a39f5b03110a56b3d93ee00b3397858f4222a3
|
[
"MIT"
] | 5
|
2021-05-12T14:16:35.000Z
|
2021-10-20T14:21:42.000Z
|
"""CBC example using the Iris dataset."""
import argparse
import prototorch as pt
import pytorch_lightning as pl
import torch
if __name__ == "__main__":
# Command-line arguments
parser = argparse.ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
# Dataset
train_ds = pt.datasets.Iris(dims=[0, 2])
# Reproducibility
pl.utilities.seed.seed_everything(seed=42)
# Dataloaders
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=32)
# Hyperparameters
hparams = dict(
distribution=[1, 0, 3],
margin=0.1,
proto_lr=0.01,
bb_lr=0.01,
)
# Initialize the model
model = pt.models.CBC(
hparams,
components_initializer=pt.initializers.SSCI(train_ds, noise=0.01),
reasonings_iniitializer=pt.initializers.
PurePositiveReasoningsInitializer(),
)
# Callbacks
vis = pt.models.VisCBC2D(data=train_ds,
title="CBC Iris Example",
resolution=100,
axis_off=True)
# Setup trainer
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[vis],
)
# Training loop
trainer.fit(model, train_loader)
| 23.907407
| 74
| 0.618125
|
77fe3789578913372badcb0e2b8089644a790749
| 21,489
|
py
|
Python
|
google/cloud/compute_v1/services/region_target_http_proxies/transports/rest.py
|
vam-google/python-compute
|
799f2f55e5e205317862a17ca7ed548ce2ca66e5
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/region_target_http_proxies/transports/rest.py
|
vam-google/python-compute
|
799f2f55e5e205317862a17ca7ed548ce2ca66e5
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/compute_v1/services/region_target_http_proxies/transports/rest.py
|
vam-google/python-compute
|
799f2f55e5e205317862a17ca7ed548ce2ca66e5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.auth.transport.requests import AuthorizedSession
from google.cloud.compute_v1.types import compute
from .base import RegionTargetHttpProxiesTransport, DEFAULT_CLIENT_INFO
class RegionTargetHttpProxiesRestTransport(RegionTargetHttpProxiesTransport):
"""REST backend transport for RegionTargetHttpProxies.
The RegionTargetHttpProxies API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._prep_wrapped_messages(client_info)
def delete(
self,
request: compute.DeleteRegionTargetHttpProxyRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteRegionTargetHttpProxyRequest):
The request object. A request message for
RegionTargetHttpProxies.Delete. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}".format(
host=self._host,
project=request.project,
region=request.region,
target_http_proxy=request.target_http_proxy,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.DeleteRegionTargetHttpProxyRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.delete(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def get(
self,
request: compute.GetRegionTargetHttpProxyRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetHttpProxy:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionTargetHttpProxyRequest):
The request object. A request message for
RegionTargetHttpProxies.Get. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TargetHttpProxy:
Represents a Target HTTP Proxy resource.
Google Compute Engine has two Target HTTP Proxy
resources:
- `Global </compute/docs/reference/rest/{$api_version}/targetHttpProxies>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionTargetHttpProxies>`__
A target HTTP proxy is a component of GCP HTTP load
balancers.
- targetHttpProxies are used by external HTTP load
balancers and Traffic Director. \*
regionTargetHttpProxies are used by internal HTTP
load balancers.
Forwarding rules reference a target HTTP proxy, and the
target proxy then references a URL map. For more
information, read Using Target Proxies and Forwarding
rule concepts. (== resource_for
{$api_version}.targetHttpProxies ==) (== resource_for
{$api_version}.regionTargetHttpProxies ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}".format(
host=self._host,
project=request.project,
region=request.region,
target_http_proxy=request.target_http_proxy,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.TargetHttpProxy.from_json(
response.content, ignore_unknown_fields=True
)
def insert(
self,
request: compute.InsertRegionTargetHttpProxyRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertRegionTargetHttpProxyRequest):
The request object. A request message for
RegionTargetHttpProxies.Insert. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Jsonify the request body
body = compute.TargetHttpProxy.to_json(
request.target_http_proxy_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
)
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/targetHttpProxies".format(
host=self._host, project=request.project, region=request.region,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.InsertRegionTargetHttpProxyRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.post(url, headers=headers, data=body,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def list(
self,
request: compute.ListRegionTargetHttpProxiesRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.TargetHttpProxyList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionTargetHttpProxiesRequest):
The request object. A request message for
RegionTargetHttpProxies.List. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.TargetHttpProxyList:
A list of TargetHttpProxy resources.
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/targetHttpProxies".format(
host=self._host, project=request.project, region=request.region,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.ListRegionTargetHttpProxiesRequest.filter in request:
query_params["filter"] = request.filter
if compute.ListRegionTargetHttpProxiesRequest.max_results in request:
query_params["maxResults"] = request.max_results
if compute.ListRegionTargetHttpProxiesRequest.order_by in request:
query_params["orderBy"] = request.order_by
if compute.ListRegionTargetHttpProxiesRequest.page_token in request:
query_params["pageToken"] = request.page_token
if compute.ListRegionTargetHttpProxiesRequest.return_partial_success in request:
query_params["returnPartialSuccess"] = request.return_partial_success
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.get(url, headers=headers,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.TargetHttpProxyList.from_json(
response.content, ignore_unknown_fields=True
)
def set_url_map(
self,
request: compute.SetUrlMapRegionTargetHttpProxyRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the set url map method over HTTP.
Args:
request (~.compute.SetUrlMapRegionTargetHttpProxyRequest):
The request object. A request message for
RegionTargetHttpProxies.SetUrlMap. See
the method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# Jsonify the request body
body = compute.UrlMapReference.to_json(
request.url_map_reference_resource,
including_default_value_fields=False,
use_integers_for_enums=False,
)
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}/setUrlMap".format(
host=self._host,
project=request.project,
region=request.region,
target_http_proxy=request.target_http_proxy,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
if compute.SetUrlMapRegionTargetHttpProxyRequest.request_id in request:
query_params["requestId"] = request.request_id
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = ["{k}={v}".format(k=k, v=v) for k, v in query_params.items()]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = self._session.post(url, headers=headers, data=body,)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
__all__ = ("RegionTargetHttpProxiesRestTransport",)
| 42.636905
| 133
| 0.620457
|
e9a532e1726db68d039d760dd3b54b47eb888e61
| 3,038
|
py
|
Python
|
tfts/models/unet.py
|
leilin-research/Time-series-prediction
|
97ca6a7525e2c6329276b66ece1747124da8ab42
|
[
"MIT"
] | 552
|
2019-07-23T10:17:49.000Z
|
2022-03-23T13:37:46.000Z
|
tfts/models/unet.py
|
leilin-research/Time-series-prediction
|
97ca6a7525e2c6329276b66ece1747124da8ab42
|
[
"MIT"
] | 12
|
2020-05-16T04:22:09.000Z
|
2022-03-23T13:38:45.000Z
|
tfts/models/unet.py
|
leilin-research/Time-series-prediction
|
97ca6a7525e2c6329276b66ece1747124da8ab42
|
[
"MIT"
] | 122
|
2019-09-09T11:34:19.000Z
|
2022-03-16T08:06:24.000Z
|
# -*- coding: utf-8 -*-
# @author: Longxing Tan, tanlongxing888@163.com
# @date: 2020-03
# paper:
# other implementations: https://www.kaggle.com/super13579/u-net-1d-cnn-with-pytorch
# https://www.kaggle.com/kmat2019/u-net-1d-cnn-with-keras
import tensorflow as tf
from tensorflow.keras.layers import (Input, AveragePooling1D, Add, UpSampling1D, Concatenate, Lambda)
from ..layers.unet_layer import *
params = {}
class Unet(object):
def __init__(self, custom_model_params):
self.params = params.update(custom_model_params)
self.AvgPool1D1 = AveragePooling1D(pool_size=2)
self.AvgPool1D2 = AveragePooling1D(pool_size=4)
self.encoder = Encoder()
self.decoder = Decoder()
def __call__(self, x, predict_seq_length, training=True):
pool1 = self.AvgPool1D1(x)
pool2 = self.AvgPool1D2(x)
encoder_output = self.encoder([x, pool1, pool2])
decoder_output = self.decoder(encoder_output, predict_seq_length=predict_seq_length)
return decoder_output
class Encoder(object):
def __init__(self):
pass
def __call__(self, input_tensor, units=64, kernel_size=2, depth=2):
x, pool1, pool2 = input_tensor
x = conv_br(x, units, kernel_size, 1, 1) # => batch_size * sequence_length * units
for i in range(depth):
x = re_block(x, units, kernel_size, 1, 1)
out_0 = x # => batch_size * sequence_length * units
x = conv_br(x, units * 2, kernel_size, 2, 1)
for i in range(depth):
x = re_block(x, units * 2, kernel_size, 1,1)
out_1 = x # => batch_size * sequence/2 * units*2
x = Concatenate()([x, pool1])
x = conv_br(x, units * 3, kernel_size, 2, 1)
for i in range(depth):
x = re_block(x, units * 3, kernel_size, 1, 1)
out_2 = x # => batch_size * sequence/2, units*3
x = Concatenate()([x, pool2])
x = conv_br(x, units * 4, kernel_size, 4, 1)
for i in range(depth):
x = re_block(x, units * 4, kernel_size, 1, 1)
return [out_0, out_1, out_2, x]
class Decoder(object):
def __init__(self):
pass
def __call__(self, input_tensor, units=64, kernel_size=2, predict_seq_length=1):
out_0, out_1, out_2, x = input_tensor
x = UpSampling1D(4)(x)
x = Concatenate()([x, out_2])
x = conv_br(x, units * 3, kernel_size, 1, 1)
x = UpSampling1D(2)(x)
x = Concatenate()([x, out_1])
x = conv_br(x, units * 2, kernel_size, 1, 1)
x = UpSampling1D(2)(x)
x = Concatenate()([x, out_0])
x = conv_br(x, units, kernel_size, 1, 1)
# regression
x = Conv1D(1, kernel_size=kernel_size, strides=1, padding="same")(x)
out = Activation("sigmoid")(x)
out = Lambda(lambda x: 12 * x)(out)
out = AveragePooling1D(strides=4)(out) # Todo: just a tricky way to change the batch*input_seq*1 -> batch_out_seq*1, need a more general way
return out
| 34.134831
| 149
| 0.608953
|
aa8ec542359b0a0fe2a05427b160679349e8a35c
| 1,532
|
py
|
Python
|
cancontroller/caniot/message/message.py
|
lucasdietrich/caniot-pycontroller
|
c8ec4a9831dc294086ff194bc09a8d9c23758848
|
[
"MIT"
] | null | null | null |
cancontroller/caniot/message/message.py
|
lucasdietrich/caniot-pycontroller
|
c8ec4a9831dc294086ff194bc09a8d9c23758848
|
[
"MIT"
] | null | null | null |
cancontroller/caniot/message/message.py
|
lucasdietrich/caniot-pycontroller
|
c8ec4a9831dc294086ff194bc09a8d9c23758848
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import struct
from cancontroller.caniot.models import MsgId, DeviceId, BufferType
from abc import ABC
import can
class CaniotMessage(ABC):
"""
Represent a caniot message
"""
@property
def arbitration_id(self):
return int(self.msgid)
def __init__(self, msgid: MsgId, buffer: BufferType = None):
if buffer is None:
buffer = []
self.msgid = msgid
assert len(buffer) <= 8
self.buffer = list(buffer)
def __repr__(self):
return f"{self.msgid} : {self.buffer}"
def get_arbitration_id(self):
return self.msgid
@classmethod
def from_can(cls, canmsg: can.Message) -> CaniotMessage:
return CaniotMessage(
msgid=MsgId.from_int(canmsg.arbitration_id, extended=False),
buffer=canmsg.data
)
def can(self) -> can.Message:
return can.Message(arbitration_id=int(self.arbitration_id),
is_extended_id=False,
data=self.buffer)
def is_response_of(self, query: CaniotMessage):
return self.msgid.is_response_of(query.msgid)
class ErrorMessage(CaniotMessage):
def get_error(self) -> int:
if len(self.buffer) < 4:
print("Invalid CANIOT ERROR FRAME")
return 0
else:
err, = struct.unpack("<i", bytearray(self.buffer[:4]))
return err
def __repr__(self):
return f"{self.msgid} : {self.get_error():04X}"
| 24.709677
| 72
| 0.607702
|
e0c8c88c58852d7e53775d211e5ad5d165fed193
| 8,522
|
py
|
Python
|
wagtail/wagtailusers/forms.py
|
edrex/wagtail
|
dc1b51a5be1a57f6cb1b90507eea6ab7f2e1affe
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailusers/forms.py
|
edrex/wagtail
|
dc1b51a5be1a57f6cb1b90507eea6ab7f2e1affe
|
[
"BSD-3-Clause"
] | null | null | null |
wagtail/wagtailusers/forms.py
|
edrex/wagtail
|
dc1b51a5be1a57f6cb1b90507eea6ab7f2e1affe
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group, Permission
from wagtail.wagtailcore import hooks
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import UserPagePermissionsProxy, GroupPagePermission
User = get_user_model()
# extend Django's UserCreationForm with an 'is_superuser' field
class UserCreationForm(BaseUserCreationForm):
required_css_class = "required"
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
help_text=_("If ticked, this user has the ability to manage user accounts.")
)
email = forms.EmailField(required=True, label=_("Email"))
first_name = forms.CharField(required=True, label=_("First Name"))
last_name = forms.CharField(required=True, label=_("Last Name"))
class Meta:
model = User
fields = ("username", "email", "first_name", "last_name", "is_superuser", "groups")
widgets = {
'groups': forms.CheckboxSelectMultiple
}
def clean_username(self):
# Method copied from parent
username = self.cleaned_data["username"]
try:
# When called from BaseUserCreationForm, the method fails if using a AUTH_MODEL_MODEL,
# This is because the following line tries to perform a lookup on
# the default "auth_user" table.
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
# users can access django-admin iff they are a superuser
user.is_staff = user.is_superuser
if commit:
user.save()
self.save_m2m()
return user
# Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes
# (to make password non-required) that it isn't worth inheriting...
class UserEditForm(forms.ModelForm):
required_css_class = "required"
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(
label=_("Username"),
max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and @/./+/-/_ characters.")
})
email = forms.EmailField(required=True, label=_("Email"))
first_name = forms.CharField(required=True, label=_("First Name"))
last_name = forms.CharField(required=True, label=_("Last Name"))
password1 = forms.CharField(
label=_("Password"),
required=False,
widget=forms.PasswordInput,
help_text=_("Leave blank if not changing."))
password2 = forms.CharField(
label=_("Password confirmation"), required=False,
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
help_text=_("Administrators have the ability to manage user accounts.")
)
class Meta:
model = User
fields = ("username", "email", "first_name", "last_name", "is_active", "is_superuser", "groups")
widgets = {
'groups': forms.CheckboxSelectMultiple
}
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.exclude(id=self.instance.id).get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserEditForm, self).save(commit=False)
# users can access django-admin iff they are a superuser
user.is_staff = user.is_superuser
if self.cleaned_data["password1"]:
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
self.save_m2m()
return user
class GroupForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GroupForm, self).__init__(*args, **kwargs)
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.fields['permissions'].queryset = self.registered_permissions
required_css_class = "required"
error_messages = {
'duplicate_name': _("A group with that name already exists."),
}
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
help_text=_("Administrators have the ability to manage user accounts.")
)
class Meta:
model = Group
fields = ("name", "permissions", )
def clean_name(self):
# Since Group.name is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
name = self.cleaned_data["name"]
try:
Group._default_manager.exclude(id=self.instance.id).get(name=name)
except Group.DoesNotExist:
return name
raise forms.ValidationError(self.error_messages['duplicate_name'])
def save(self):
# We go back to the object to read (in order to reapply) the
# permissions which were set on this group, but which are not
# accessible in the wagtail admin interface, as otherwise these would
# be clobbered by this form.
try:
untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)
bool(untouchable_permissions) # force this to be evaluated, as it's about to change
except ValueError:
# this form is not bound; we're probably creating a new group
untouchable_permissions = []
group = super(GroupForm, self).save()
group.permissions.add(*untouchable_permissions)
return group
class GroupPagePermissionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GroupPagePermissionForm, self).__init__(*args, **kwargs)
self.fields['page'].widget = forms.HiddenInput()
class Meta:
model = GroupPagePermission
fields = ('page', 'permission_type')
class BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):
def __init__(self, *args, **kwargs):
super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)
self.form = GroupPagePermissionForm
for form in self.forms:
form.fields['DELETE'].widget = forms.HiddenInput()
@property
def empty_form(self):
empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form
empty_form.fields['DELETE'].widget = forms.HiddenInput()
return empty_form
class NotificationPreferencesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(NotificationPreferencesForm, self).__init__(*args, **kwargs)
user_perms = UserPagePermissionsProxy(self.instance.user)
if not user_perms.can_publish_pages():
del self.fields['submitted_notifications']
if not user_perms.can_edit_pages():
del self.fields['approved_notifications']
del self.fields['rejected_notifications']
class Meta:
model = UserProfile
fields = ("submitted_notifications", "approved_notifications", "rejected_notifications")
| 37.377193
| 107
| 0.662873
|
18677e029cd98f78fd12118c7aba978ef52b2fa1
| 271
|
py
|
Python
|
1337. The K Weakest Rows in a Matrix/main.py
|
amanchadha/LeetCode
|
20dddf0616351ad399f0fa03cb6a2b5cbdd25279
|
[
"MIT"
] | 1
|
2021-07-18T06:18:40.000Z
|
2021-07-18T06:18:40.000Z
|
1337. The K Weakest Rows in a Matrix/main.py
|
amanchadha/LeetCode
|
20dddf0616351ad399f0fa03cb6a2b5cbdd25279
|
[
"MIT"
] | null | null | null |
1337. The K Weakest Rows in a Matrix/main.py
|
amanchadha/LeetCode
|
20dddf0616351ad399f0fa03cb6a2b5cbdd25279
|
[
"MIT"
] | 3
|
2020-09-27T05:48:30.000Z
|
2021-08-13T10:07:08.000Z
|
class Solution:
def kWeakestRows(self, mat: List[List[int]], k: int) -> List[int]:
res = []
m, n = len(mat), len(mat[0])
for i in range(m):
res.append((mat[i].count(1), i))
res.sort()
return [e[1] for e in res[:k]]
| 30.111111
| 70
| 0.487085
|
ccc57715c94127b71f63decd96559d708d35027c
| 19,469
|
py
|
Python
|
pymeasure/instruments/keithley/keithley6221.py
|
carez/pymeasure
|
32423d20ac13ee1ae889764ff46ad83271444b93
|
[
"MIT"
] | 153
|
2020-11-12T17:01:31.000Z
|
2022-03-29T09:54:38.000Z
|
pymeasure/instruments/keithley/keithley6221.py
|
carez/pymeasure
|
32423d20ac13ee1ae889764ff46ad83271444b93
|
[
"MIT"
] | 226
|
2020-11-12T07:36:11.000Z
|
2022-03-22T08:18:26.000Z
|
pymeasure/instruments/keithley/keithley6221.py
|
carez/pymeasure
|
32423d20ac13ee1ae889764ff46ad83271444b93
|
[
"MIT"
] | 85
|
2020-11-15T03:41:28.000Z
|
2022-03-25T07:14:18.000Z
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import time
import numpy as np
from pymeasure.instruments import Instrument, RangeException
from pymeasure.instruments.validators import truncated_range, strict_discrete_set
from .buffer import KeithleyBuffer
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class Keithley6221(Instrument, KeithleyBuffer):
""" Represents the Keithely 6221 AC and DC current source and provides a
high-level interface for interacting with the instrument.
.. code-block:: python
keithley = Keithley6221("GPIB::1")
keithley.clear()
# Use the keithley as an AC source
keithley.waveform_function = "square" # Set a square waveform
keithley.waveform_amplitude = 0.05 # Set the amplitude in Amps
keithley.waveform_offset = 0 # Set zero offset
keithley.source_compliance = 10 # Set compliance (limit) in V
keithley.waveform_dutycycle = 50 # Set duty cycle of wave in %
keithley.waveform_frequency = 347 # Set the frequency in Hz
keithley.waveform_ranging = "best" # Set optimal output ranging
keithley.waveform_duration_cycles = 100 # Set duration of the waveform
# Link end of waveform to Service Request status bit
keithley.operation_event_enabled = 128 # OSB listens to end of wave
keithley.srq_event_enabled = 128 # SRQ listens to OSB
keithley.waveform_arm() # Arm (load) the waveform
keithley.waveform_start() # Start the waveform
keithley.adapter.wait_for_srq() # Wait for the pulse to finish
keithley.waveform_abort() # Disarm (unload) the waveform
keithley.shutdown() # Disables output
"""
##########
# OUTPUT #
##########
source_enabled = Instrument.control(
"OUTPut?", "OUTPut %d",
"""A boolean property that controls whether the source is enabled, takes
values True or False. The convenience methods :meth:`~.Keithley6221.enable_source` and
:meth:`~.Keithley6221.disable_source` can also be used.""",
validator=strict_discrete_set,
values={True: 1, False: 0},
map_values=True
)
source_delay = Instrument.control(
":SOUR:DEL?", ":SOUR:DEL %g",
""" A floating point property that sets a manual delay for the source
after the output is turned on before a measurement is taken. When this
property is set, the auto delay is turned off. Valid values are
between 1e-3 [seconds] and 999999.999 [seconds].""",
validator=truncated_range,
values=[1e-3, 999999.999],
)
output_low_grounded = Instrument.control(
":OUTP:LTE?", "OUTP:LTE %d",
""" A boolean property that controls whether the low output of the triax
connection is connected to earth ground (True) or is floating (False). """,
validator=strict_discrete_set,
values={True: 1, False: 0},
map_values=True
)
##########
# SOURCE #
##########
source_current = Instrument.control(
":SOUR:CURR?", ":SOUR:CURR %g",
""" A floating point property that controls the source current
in Amps. """,
validator=truncated_range,
values=[-0.105, 0.105]
)
source_compliance = Instrument.control(
":SOUR:CURR:COMP?", ":SOUR:CURR:COMP %g",
"""A floating point property that controls the compliance of the current
source in Volts. valid values are in range 0.1 [V] to 105 [V].""",
validator=truncated_range,
values=[0.1, 105])
source_range = Instrument.control(
":SOUR:CURR:RANG?", ":SOUR:CURR:RANG:AUTO 0;:SOUR:CURR:RANG %g",
""" A floating point property that controls the source current
range in Amps, which can take values between -0.105 A and +0.105 A.
Auto-range is disabled when this property is set. """,
validator=truncated_range,
values=[-0.105, 0.105]
)
source_auto_range = Instrument.control(
":SOUR:CURR:RANG:AUTO?", ":SOUR:CURR:RANG:AUTO %d",
""" A boolean property that controls the auto range of the current source.
Valid values are True or False. """,
values={True: 1, False: 0},
map_values=True,
)
##################
# WAVE FUNCTIONS #
##################
waveform_function = Instrument.control(
":SOUR:WAVE:FUNC?", ":SOUR:WAVE:FUNC %s",
""" A string property that controls the selected wave function. Valid
values are "sine", "ramp", "square", "arbitrary1", "arbitrary2",
"arbitrary3" and "arbitrary4". """,
values={
"sine": "SIN",
"ramp": "RAMP",
"square": "SQU",
"arbitrary1": "ARB1",
"arbitrary2": "ARB2",
"arbitrary3": "ARB3",
"arbitrary4": "ARB4",
},
map_values=True
)
waveform_frequency = Instrument.control(
":SOUR:WAVE:FREQ?", ":SOUR:WAVE:FREQ %g",
"""A floating point property that controls the frequency of the
waveform in Hertz. Valid values are in range 1e-3 to 1e5. """,
validator=truncated_range,
values=[1e-3, 1e5]
)
waveform_amplitude = Instrument.control(
":SOUR:WAVE:AMPL?", ":SOUR:WAVE:AMPL %g",
"""A floating point property that controls the (peak) amplitude of the
waveform in Amps. Valid values are in range 2e-12 to 0.105. """,
validator=truncated_range,
values=[2e-12, 0.105]
)
waveform_offset = Instrument.control(
":SOUR:WAVE:OFFS?", ":SOUR:WAVE:OFFS %g",
"""A floating point property that controls the offset of the waveform
in Amps. Valid values are in range -0.105 to 0.105. """,
validator=truncated_range,
values=[-0.105, 0.105]
)
waveform_dutycycle = Instrument.control(
":SOUR:WAVE:DCYC?", ":SOUR:WAVE:DCYC %g",
"""A floating point property that controls the duty-cycle of the
waveform in percent for the square and ramp waves. Valid values are in
range 0 to 100. """,
validator=truncated_range,
values=[0, 100]
)
waveform_duration_time = Instrument.control(
":SOUR:WAVE:DUR:TIME?", ":SOUR:WAVE:DUR:TIME %g",
"""A floating point property that controls the duration of the
waveform in seconds. Valid values are in range 100e-9 to 999999.999.
""",
validator=truncated_range,
values=[100e-9, 999999.999]
)
waveform_duration_cycles = Instrument.control(
":SOUR:WAVE:DUR:CYCL?", ":SOUR:WAVE:DUR:CYCL %g",
"""A floating point property that controls the duration of the
waveform in cycles. Valid values are in range 1e-3 to 99999999900.
""",
validator=truncated_range,
values=[1e-3, 99999999900]
)
def waveform_duration_set_infinity(self):
""" Set the waveform duration to infinity.
"""
self.write(":SOUR:WAVE:DUR:TIME INF")
waveform_ranging = Instrument.control(
":SOUR:WAVE:RANG?", ":SOUR:WAVE:RANG %s",
""" A string property that controls the source ranging of the
waveform. Valid values are "best" and "fixed". """,
values={"best": "BEST", "fixed": "FIX"},
map_values=True,
)
waveform_use_phasemarker = Instrument.control(
":SOUR:WAVE:PMAR:STAT?", ":SOUR:WAVE:PMAR:STAT %s",
""" A boolean property that controls whether the phase marker option
is turned on or of. Valid values True (on) or False (off). Other
settings for the phase marker have not yet been implemented.""",
values={True: 1, False: 0},
map_values=True,
)
def waveform_arm(self):
""" Arm the current waveform function. """
self.write(":SOUR:WAVE:ARM")
def waveform_start(self):
""" Start the waveform output. Must already be armed """
self.write(":SOUR:WAVE:INIT")
def waveform_abort(self):
""" Abort the waveform output and disarm the waveform function. """
self.write(":SOUR:WAVE:ABOR")
def define_arbitary_waveform(self, datapoints, location=1):
""" Define the data points for the arbitrary waveform and copy the
defined waveform into the given storage location.
:param datapoints: a list (or numpy array) of the data points; all
values have to be between -1 and 1; 100 points maximum.
:param location: integer storage location to store the waveform in.
Value must be in range 1 to 4.
"""
# Check validity of parameters
if not isinstance(datapoints, (list, np.ndarray)):
raise ValueError("datapoints must be a list or numpy array")
elif len(datapoints) > 100:
raise ValueError("datapoints cannot be longer than 100 points")
elif not all([x >= -1 and x <= 1 for x in datapoints]):
raise ValueError("all data points must be between -1 and 1")
if location not in [1, 2, 3, 4]:
raise ValueError("location must be in [1, 2, 3, 4]")
# Make list of strings
datapoints = [str(x) for x in datapoints]
data = ", ".join(datapoints)
# Write the data points to the Keithley 6221
self.write(":SOUR:WAVE:ARB:DATA %s" % data)
# Copy the written data to the specified location
self.write(":SOUR:WAVE:ARB:COPY %d" % location)
# Select the newly made arbitrary waveform as waveform function
self.waveform_function = "arbitrary%d" % location
def __init__(self, adapter, **kwargs):
super(Keithley6221, self).__init__(
adapter, "Keithley 6221 SourceMeter", **kwargs
)
def enable_source(self):
""" Enables the source of current or voltage depending on the
configuration of the instrument. """
self.write("OUTPUT ON")
def disable_source(self):
""" Disables the source of current or voltage depending on the
configuration of the instrument. """
self.write("OUTPUT OFF")
def beep(self, frequency, duration):
""" Sounds a system beep.
:param frequency: A frequency in Hz between 65 Hz and 2 MHz
:param duration: A time in seconds between 0 and 7.9 seconds
"""
self.write(":SYST:BEEP %g, %g" % (frequency, duration))
def triad(self, base_frequency, duration):
""" Sounds a musical triad using the system beep.
:param base_frequency: A frequency in Hz between 65 Hz and 1.3 MHz
:param duration: A time in seconds between 0 and 7.9 seconds
"""
self.beep(base_frequency, duration)
time.sleep(duration)
self.beep(base_frequency * 5.0 / 4.0, duration)
time.sleep(duration)
self.beep(base_frequency * 6.0 / 4.0, duration)
display_enabled = Instrument.control(
":DISP:ENAB?", ":DISP:ENAB %d",
""" A boolean property that controls whether or not the display of the
sourcemeter is enabled. Valid values are True and False. """,
values={True: 1, False: 0},
map_values=True,
)
@property
def error(self):
""" Returns a tuple of an error code and message from a
single error. """
err = self.values(":system:error?")
if len(err) < 2:
err = self.read() # Try reading again
code = err[0]
message = err[1].replace('"', '')
return (code, message)
def check_errors(self):
""" Logs any system errors reported by the instrument.
"""
code, message = self.error
while code != 0:
t = time.time()
log.info("Keithley 6221 reported error: %d, %s" % (code, message))
code, message = self.error
if (time.time() - t) > 10:
log.warning("Timed out for Keithley 6221 error retrieval.")
def reset(self):
""" Resets the instrument and clears the queue. """
self.write("status:queue:clear;*RST;:stat:pres;:*CLS;")
def trigger(self):
""" Executes a bus trigger, which can be used when
:meth:`~.trigger_on_bus` is configured.
"""
return self.write("*TRG")
def trigger_immediately(self):
""" Configures measurements to be taken with the internal
trigger at the maximum sampling rate.
"""
self.write(":ARM:SOUR IMM;:TRIG:SOUR IMM;")
def trigger_on_bus(self):
""" Configures the trigger to detect events based on the bus
trigger, which can be activated by :meth:`~.trigger`.
"""
self.write(":ARM:SOUR BUS;:TRIG:SOUR BUS;")
def set_timed_arm(self, interval):
""" Sets up the measurement to be taken with the internal
trigger at a variable sampling rate defined by the interval
in seconds between sampling points
"""
if interval > 99999.99 or interval < 0.001:
raise RangeException("Keithley 6221 can only be time"
" triggered between 1 mS and 1 Ms")
self.write(":ARM:SOUR TIM;:ARM:TIM %.3f" % interval)
def trigger_on_external(self, line=1):
""" Configures the measurement trigger to be taken from a
specific line of an external trigger
:param line: A trigger line from 1 to 4
"""
cmd = ":ARM:SOUR TLIN;:TRIG:SOUR TLIN;"
cmd += ":ARM:ILIN %d;:TRIG:ILIN %d;" % (line, line)
self.write(cmd)
def output_trigger_on_external(self, line=1, after='DEL'):
""" Configures the output trigger on the specified trigger link
line number, with the option of supplying the part of the
measurement after which the trigger should be generated
(default to delay, which is right before the measurement)
:param line: A trigger line from 1 to 4
:param after: An event string that determines when to trigger
"""
self.write(":TRIG:OUTP %s;:TRIG:OLIN %d;" % (after, line))
def disable_output_trigger(self):
""" Disables the output trigger for the Trigger layer
"""
self.write(":TRIG:OUTP NONE")
def shutdown(self):
""" Disables the output. """
log.info("Shutting down %s." % self.name)
self.disable_source()
###############
# Status bits #
###############
measurement_event_enabled = Instrument.control(
":STAT:MEAS:ENAB?", ":STAT:MEAS:ENAB %d",
""" An integer value that controls which measurement events are
registered in the Measurement Summary Bit (MSB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
operation_event_enabled = Instrument.control(
":STAT:OPER:ENAB?", ":STAT:OPER:ENAB %d",
""" An integer value that controls which operation events are
registered in the Operation Summary Bit (OSB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
questionable_event_enabled = Instrument.control(
":STAT:QUES:ENAB?", ":STAT:QUES:ENAB %d",
""" An integer value that controls which questionable events are
registered in the Questionable Summary Bit (QSB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
standard_event_enabled = Instrument.control(
"ESE?", "ESE %d",
""" An integer value that controls which standard events are
registered in the Event Summary Bit (ESB) status bit. Refer to
the Model 6220/6221 Reference Manual for more information about
programming the status bits.
""",
cast=int,
validator=truncated_range,
values=[0, 65535],
)
srq_event_enabled = Instrument.control(
"*SRE?", "*SRE %d",
""" An integer value that controls which event registers trigger the
Service Request (SRQ) status bit. Refer to the Model 6220/6221
Reference Manual for more information about programming the status
bits.
""",
cast=int,
validator=truncated_range,
values=[0, 255],
)
measurement_events = Instrument.measurement(
":STAT:MEAS?",
""" An integer value that reads which measurement events have been
registered in the Measurement event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
operation_events = Instrument.measurement(
":STAT:OPER?",
""" An integer value that reads which operation events have been
registered in the Operation event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
questionable_events = Instrument.measurement(
":STAT:QUES?",
""" An integer value that reads which questionable events have been
registered in the Questionable event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
standard_events = Instrument.measurement(
"*ESR?",
""" An integer value that reads which standard events have been
registered in the Standard event registers. Refer to the Model
6220/6221 Reference Manual for more information about programming
the status bits. Reading this value clears the register.
""",
cast=int,
)
| 38.552475
| 94
| 0.625045
|
26f3ae91c0ed02d50421d99bad1c94ac935b4564
| 76,211
|
py
|
Python
|
orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py
|
vpisarev/onnxruntime
|
bab9b80f1f2330d3a115e0abbb4d8278c2be3f44
|
[
"MIT"
] | null | null | null |
orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py
|
vpisarev/onnxruntime
|
bab9b80f1f2330d3a115e0abbb4d8278c2be3f44
|
[
"MIT"
] | null | null | null |
orttraining/orttraining/test/python/orttraining_test_orttrainer_frontend.py
|
vpisarev/onnxruntime
|
bab9b80f1f2330d3a115e0abbb4d8278c2be3f44
|
[
"MIT"
] | null | null | null |
from functools import partial
import inspect
import math
from distutils.version import StrictVersion
from numpy.testing import assert_allclose
import onnx
import os
import pytest
import tempfile
import torch
import torch.nn.functional as F
from onnxruntime import set_seed
from onnxruntime.capi.ort_trainer import IODescription as Legacy_IODescription,\
ModelDescription as Legacy_ModelDescription,\
LossScaler as Legacy_LossScaler,\
ORTTrainer as Legacy_ORTTrainer
from onnxruntime.training import _utils, amp, checkpoint, optim, orttrainer, TrainStepInfo,\
model_desc_validation as md_val,\
orttrainer_options as orttrainer_options
import _test_commons,_test_helpers
from onnxruntime import SessionOptions
from onnxruntime.training import PropagateCastOpsStrategy
###############################################################################
# Testing starts here #########################################################
###############################################################################
pytorch_110 = StrictVersion('.'.join(torch.__version__.split('.')[:2])) >= StrictVersion('1.10.0')
def get_model_opset(model_onnx):
for op in model_onnx.opset_import:
if op.domain == '':
return op.version
return None
@pytest.mark.parametrize("test_input", [
({}),
({'batch': {},
'device': {},
'distributed': {},
'mixed_precision': {},
'utils': {},
'_internal_use': {}})
])
def testORTTrainerOptionsDefaultValues(test_input):
''' Test different ways of using default values for incomplete input'''
expected_values = {
'batch': {
'gradient_accumulation_steps': 1
},
'device': {
'id': 'cuda',
'mem_limit': 0
},
'distributed': {
'world_rank': 0,
'world_size': 1,
'local_rank': 0,
'data_parallel_size': 1,
'horizontal_parallel_size': 1,
'pipeline_parallel' : {
'pipeline_parallel_size': 1,
'num_pipeline_micro_batches':1,
'pipeline_cut_info_string': '',
'sliced_schema' : {},
'sliced_axes' : {},
'sliced_tensor_names': []
},
'allreduce_post_accumulation': False,
'data_parallel_size': 1,
'horizontal_parallel_size':1,
'deepspeed_zero_optimization': {
'stage' : 0,
},
'enable_adasum': False,
},
'lr_scheduler': None,
'mixed_precision': {
'enabled': False,
'loss_scaler': None
},
'graph_transformer': {
'attn_dropout_recompute': False,
'gelu_recompute': False,
'transformer_layer_recompute': False,
'number_recompute_layers': 0,
'allow_layer_norm_mod_precision': False,
'propagate_cast_ops_config': {
'strategy': PropagateCastOpsStrategy.FLOOD_FILL,
'level': 1,
'allow': []
}
},
'utils': {
'frozen_weights': [],
'grad_norm_clip': True,
'memory_efficient_gradient': False,
'run_symbolic_shape_infer': False
},
'debug': {
'deterministic_compute': False,
'check_model_export': False,
'graph_save_paths' : {
'model_after_graph_transforms_path': '',
'model_with_gradient_graph_path': '',
'model_with_training_graph_path': '',
'model_with_training_graph_after_optimization_path': ''
}
},
'_internal_use': {
'enable_internal_postprocess': True,
'extra_postprocess': None,
'onnx_opset_version' : 14,
'enable_onnx_contrib_ops': True,
},
'provider_options':{},
'session_options': None,
}
actual_values = orttrainer_options.ORTTrainerOptions(test_input)
assert actual_values._validated_opts == expected_values
@pytest.mark.parametrize("input,error_msg", [
({'mixed_precision': {'enabled': 1}},\
"Invalid options: {'mixed_precision': [{'enabled': ['must be of boolean type']}]}")
])
def testORTTrainerOptionsInvalidMixedPrecisionEnabledSchema(input, error_msg):
'''Test an invalid input based on schema validation error message'''
with pytest.raises(ValueError) as e:
orttrainer_options.ORTTrainerOptions(input)
assert str(e.value) == error_msg
@pytest.mark.parametrize("input_dict,input_dtype,output_dtype", [
({'inputs': [('in0', [])],
'outputs': [('out0', []), ('out1', [])]},(torch.int,),(torch.float,torch.int32,)),
({'inputs': [('in0', ['batch', 2, 3])],
'outputs': [('out0', [], True)]}, (torch.int8,), (torch.int16,)),
({'inputs': [('in0', []), ('in1', [1]), ('in2', [1, 2]), ('in3', [1000, 'dyn_ax1']), ('in4', ['dyn_ax1', 'dyn_ax2', 'dyn_ax3'])],
'outputs': [('out0', [], True), ('out1', [1], False), ('out2', [1, 'dyn_ax1', 3])]},
(torch.float,torch.uint8,torch.bool,torch.double,torch.half,), (torch.float,torch.float,torch.int64))
])
def testORTTrainerModelDescValidSchemas(input_dict, input_dtype, output_dtype):
r''' Test different ways of using default values for incomplete input'''
model_description = md_val._ORTTrainerModelDesc(input_dict)
# Validating hard-coded learning rate description
assert model_description.learning_rate.name == md_val.LEARNING_RATE_IO_DESCRIPTION_NAME
assert model_description.learning_rate.shape == [1]
assert model_description.learning_rate.dtype == torch.float32
# Validating model description from user
for idx, i_desc in enumerate(model_description.inputs):
assert isinstance(i_desc, model_description._InputDescription)
assert len(i_desc) == 2
assert input_dict['inputs'][idx][0] == i_desc.name
assert input_dict['inputs'][idx][1] == i_desc.shape
for idx, o_desc in enumerate(model_description.outputs):
assert isinstance(o_desc, model_description._OutputDescription)
assert len(o_desc) == 3
assert input_dict['outputs'][idx][0] == o_desc.name
assert input_dict['outputs'][idx][1] == o_desc.shape
is_loss = input_dict['outputs'][idx][2] if len(input_dict['outputs'][idx]) == 3 else False
assert is_loss == o_desc.is_loss
# Set all_finite name and check its description
model_description.all_finite = md_val.ALL_FINITE_IO_DESCRIPTION_NAME
assert model_description.all_finite.name == md_val.ALL_FINITE_IO_DESCRIPTION_NAME
assert model_description.all_finite.shape == [1]
assert model_description.all_finite.dtype == torch.bool
# Set loss_scale_input and check its description
model_description.loss_scale_input = md_val.LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME
assert model_description.loss_scale_input.name == md_val.LOSS_SCALE_INPUT_IO_DESCRIPTION_NAME
assert model_description.loss_scale_input.shape == []
assert model_description.loss_scale_input.dtype == torch.float32
# Append type to inputs/outputs tuples
for idx, i_desc in enumerate(model_description.inputs):
model_description.add_type_to_input_description(idx, input_dtype[idx])
for idx, o_desc in enumerate(model_description.outputs):
model_description.add_type_to_output_description(idx, output_dtype[idx])
# Verify inputs/outputs tuples are replaced by the typed counterparts
for idx, i_desc in enumerate(model_description.inputs):
assert isinstance(i_desc, model_description._InputDescriptionTyped)
assert input_dtype[idx] == i_desc.dtype
for idx, o_desc in enumerate(model_description.outputs):
assert isinstance(o_desc, model_description._OutputDescriptionTyped)
assert output_dtype[idx] == o_desc.dtype
@pytest.mark.parametrize("input_dict,error_msg", [
({'inputs': [(True, [])],
'outputs': [(True, [])]},
"Invalid model_desc: {'inputs': [{0: ['the first element of the tuple (aka name) must be a string']}], "
"'outputs': [{0: ['the first element of the tuple (aka name) must be a string']}]}"),
({'inputs': [('in1', None)],
'outputs': [('out1', None)]},
"Invalid model_desc: {'inputs': [{0: ['the second element of the tuple (aka shape) must be a list']}], "
"'outputs': [{0: ['the second element of the tuple (aka shape) must be a list']}]}"),
({'inputs': [('in1', [])],
'outputs': [('out1', [], None)]},
"Invalid model_desc: {'outputs': [{0: ['the third element of the tuple (aka is_loss) must be a boolean']}]}"),
({'inputs': [('in1', [True])],
'outputs': [('out1', [True])]},
"Invalid model_desc: {'inputs': [{0: ['each shape must be either a string or integer']}], "
"'outputs': [{0: ['each shape must be either a string or integer']}]}"),
({'inputs': [('in1', [])],
'outputs': [('out1', [], True), ('out2', [], True)]},
"Invalid model_desc: {'outputs': [{1: ['only one is_loss can bet set to True']}]}"),
({'inputz': [('in1', [])],
'outputs': [('out1', [], True)]},
"Invalid model_desc: {'inputs': ['required field'], 'inputz': ['unknown field']}"),
({'inputs': [('in1', [])],
'outputz': [('out1', [], True)]},
"Invalid model_desc: {'outputs': ['required field'], 'outputz': ['unknown field']}"),
])
def testORTTrainerModelDescInvalidSchemas(input_dict, error_msg):
r''' Test different ways of using default values for incomplete input'''
with pytest.raises(ValueError) as e:
md_val._ORTTrainerModelDesc(input_dict)
assert str(e.value) == error_msg
def testDynamicLossScaler():
rtol = 1e-7
default_scaler = amp.loss_scaler.DynamicLossScaler()
# Initial state
train_step_info = orttrainer.TrainStepInfo(optim.LambConfig())
assert_allclose(default_scaler.loss_scale, float(1 << 16),
rtol=rtol, err_msg="loss scale mismatch")
assert default_scaler.up_scale_window == 2000
assert_allclose(default_scaler.min_loss_scale, 1.0,
rtol=rtol, err_msg="min loss scale mismatch")
assert_allclose(default_scaler.max_loss_scale, float(
1 << 24), rtol=rtol, err_msg="max loss scale mismatch")
# Performing 9*2000 updates to cover all branches of LossScaler.update(train_step_info.all_finite=True)
loss_scale = float(1 << 16)
for cycles in range(1, 10):
# 1999 updates without overflow produces 1999 stable steps
for i in range(1, 2000):
new_loss_scale = default_scaler.update(train_step_info)
assert default_scaler._stable_steps_count == i
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg=f"loss scale mismatch at update {i}")
# 2000th update without overflow doubles the loss and zero stable steps until max_loss_scale is reached
new_loss_scale = default_scaler.update(train_step_info)
if cycles <= 8:
loss_scale *= 2
assert default_scaler._stable_steps_count == 0
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
# After 8 cycles, loss scale should be float(1 << 16)*(2**8)
assert_allclose(new_loss_scale, float(1 << 16)
* (2**8), rtol=rtol, err_msg="loss scale mismatch")
# After 9 cycles, loss scale reaches max_loss_scale and it is not doubled from that point on
loss_scale = float(1 << 16)*(2**8)
for count in range(1, 2050):
new_loss_scale = default_scaler.update(train_step_info)
assert default_scaler._stable_steps_count == (count % 2000)
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
# Setting train_step_info.all_finite = False to test down scaling
train_step_info.all_finite = False
# Performing 24 updates to half the loss scale each time
loss_scale = float(1 << 16)*(2**8)
for count in range(1, 25):
new_loss_scale = default_scaler.update(train_step_info)
loss_scale /= 2
assert default_scaler._stable_steps_count == 0
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
# After 24 updates with gradient overflow, loss scale is 1.0
assert_allclose(new_loss_scale, 1.,
rtol=rtol, err_msg="loss scale mismatch")
# After 25 updates, min_loss_scale is reached and loss scale is not halfed from that point on
for count in range(1, 5):
new_loss_scale = default_scaler.update(train_step_info)
assert default_scaler._stable_steps_count == 0
assert_allclose(new_loss_scale, loss_scale,
rtol=rtol, err_msg="loss scale mismatch")
def testDynamicLossScalerCustomValues():
rtol = 1e-7
scaler = amp.loss_scaler.DynamicLossScaler(automatic_update=False,
loss_scale=3,
up_scale_window=7,
min_loss_scale=5,
max_loss_scale=10)
assert scaler.automatic_update == False
assert_allclose(scaler.loss_scale, 3, rtol=rtol,
err_msg="loss scale mismatch")
assert_allclose(scaler.min_loss_scale, 5, rtol=rtol,
err_msg="min loss scale mismatch")
assert_allclose(scaler.max_loss_scale, 10, rtol=rtol,
err_msg="max loss scale mismatch")
assert scaler.up_scale_window == 7
def testTrainStepInfo():
'''Test valid initializations of TrainStepInfo'''
optimizer_config = optim.LambConfig()
fetches=['out1','out2']
step_info = orttrainer.TrainStepInfo(optimizer_config=optimizer_config,
all_finite=False,
fetches=fetches,
optimization_step=123,
step=456)
assert step_info.optimizer_config == optimizer_config
assert step_info.all_finite == False
assert step_info.fetches == fetches
assert step_info.optimization_step == 123
assert step_info.step == 456
step_info = orttrainer.TrainStepInfo(optimizer_config)
assert step_info.optimizer_config == optimizer_config
assert step_info.all_finite == True
assert step_info.fetches == []
assert step_info.optimization_step == 0
assert step_info.step == 0
@pytest.mark.parametrize("invalid_input", [
(-1),
('Hello'),
])
def testTrainStepInfoInvalidInput(invalid_input):
'''Test invalid initialization of TrainStepInfo'''
optimizer_config = optim.LambConfig()
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, all_finite=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, fetches=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, optimization_step=invalid_input)
with pytest.raises(AssertionError):
orttrainer.TrainStepInfo(optimizer_config, step=invalid_input)
@pytest.mark.parametrize("optim_name,lr,alpha,default_alpha", [
('AdamOptimizer', .1, .2, None),
('LambOptimizer', .2, .3, None),
('SGDOptimizer', .3, .4, None),
('SGDOptimizer', .3, .4, .5)
])
def testOptimizerConfig(optim_name, lr, alpha, default_alpha):
'''Test initialization of _OptimizerConfig'''
defaults = {'lr': lr, 'alpha': alpha}
params = [{'params': ['fc1.weight', 'fc2.weight']}]
if default_alpha is not None:
params[0].update({'alpha': default_alpha})
else:
params[0].update({'alpha': alpha})
cfg = optim.config._OptimizerConfig(
name=optim_name, params=params, defaults=defaults)
assert cfg.name == optim_name
rtol = 1e-07
assert_allclose(defaults['lr'],
cfg.lr, rtol=rtol, err_msg="lr mismatch")
# 1:1 mapping between defaults and params's hyper parameters
for param in params:
for k, _ in param.items():
if k != 'params':
assert k in cfg.defaults, "hyper parameter {k} not present in one of the parameter params"
for k, _ in cfg.defaults.items():
for param in cfg.params:
assert k in param, "hyper parameter {k} not present in one of the parameter params"
@pytest.mark.parametrize("optim_name,defaults,params", [
('AdamOptimizer', {'lr': -1}, []), # invalid lr
('FooOptimizer', {'lr': 0.001}, []), # invalid name
('SGDOptimizer', [], []), # invalid type(defaults)
(optim.AdamConfig, {'lr': 0.003}, []), # invalid type(name)
('AdamOptimizer', {'lr': None}, []), # missing 'lr' hyper parameter
('SGDOptimizer', {'lr': 0.004}, {}), # invalid type(params)
# invalid type(params[i])
('AdamOptimizer', {'lr': 0.005, 'alpha': 2}, [[]]),
# missing 'params' at 'params'
('AdamOptimizer', {'lr': 0.005, 'alpha': 2}, [{'alpha': 1}]),
# missing 'alpha' at 'defaults'
('AdamOptimizer', {'lr': 0.005}, [{'params': 'param1', 'alpha': 1}]),
])
def testOptimizerConfigInvalidInputs(optim_name, defaults, params):
'''Test invalid initialization of _OptimizerConfig'''
with pytest.raises(AssertionError):
optim.config._OptimizerConfig(
name=optim_name, params=params, defaults=defaults)
def testOptimizerConfigSGD():
'''Test initialization of SGD'''
cfg = optim.SGDConfig()
assert cfg.name == 'SGDOptimizer'
rtol = 1e-07
assert_allclose(0.001, cfg.lr, rtol=rtol, err_msg="lr mismatch")
cfg = optim.SGDConfig(lr=0.002)
assert_allclose(0.002, cfg.lr, rtol=rtol, err_msg="lr mismatch")
# SGD does not support params
with pytest.raises(AssertionError) as e:
params = [{'params': ['layer1.weight'], 'lr': 0.1}]
optim.SGDConfig(params=params, lr=0.002)
assert_allclose(0.002, cfg.lr, rtol=rtol, err_msg="lr mismatch")
assert str(e.value) == "'params' must be an empty list for SGD optimizer"
def testOptimizerConfigAdam():
'''Test initialization of Adam'''
cfg = optim.AdamConfig()
assert cfg.name == 'AdamOptimizer'
rtol = 1e-7
assert_allclose(0.001, cfg.lr, rtol=rtol, err_msg="lr mismatch")
assert_allclose(0.9, cfg.alpha, rtol=rtol, err_msg="alpha mismatch")
assert_allclose(0.999, cfg.beta, rtol=rtol, err_msg="beta mismatch")
assert_allclose(0.0, cfg.lambda_coef, rtol=rtol,
err_msg="lambda_coef mismatch")
assert_allclose(1e-8, cfg.epsilon, rtol=rtol, err_msg="epsilon mismatch")
assert_allclose(1.0, cfg.max_norm_clip, rtol=rtol, err_msg="max_norm_clip mismatch")
assert cfg.do_bias_correction == True, "lambda_coef mismatch"
assert cfg.weight_decay_mode == optim.AdamConfig.DecayMode.BEFORE_WEIGHT_UPDATE, "weight_decay_mode mismatch"
def testOptimizerConfigLamb():
'''Test initialization of Lamb'''
cfg = optim.LambConfig()
assert cfg.name == 'LambOptimizer'
rtol = 1e-7
assert_allclose(0.001, cfg.lr, rtol=rtol, err_msg="lr mismatch")
assert_allclose(0.9, cfg.alpha, rtol=rtol, err_msg="alpha mismatch")
assert_allclose(0.999, cfg.beta, rtol=rtol, err_msg="beta mismatch")
assert_allclose(0.0, cfg.lambda_coef, rtol=rtol,
err_msg="lambda_coef mismatch")
assert cfg.ratio_min == float('-inf'), "ratio_min mismatch"
assert cfg.ratio_max == float('inf'), "ratio_max mismatch"
assert_allclose(1e-6, cfg.epsilon, rtol=rtol, err_msg="epsilon mismatch")
assert_allclose(1.0, cfg.max_norm_clip, rtol=rtol, err_msg="max_norm_clip mismatch")
assert cfg.do_bias_correction == False, "do_bias_correction mismatch"
@pytest.mark.parametrize("optim_name", [
('Adam'),
('Lamb')
])
def testOptimizerConfigParams(optim_name):
rtol = 1e-7
params = [{'params': ['layer1.weight'], 'alpha': 0.1}]
if optim_name == 'Adam':
cfg = optim.AdamConfig(params=params, alpha=0.2)
elif optim_name == 'Lamb':
cfg = optim.LambConfig(params=params, alpha=0.2)
else:
raise ValueError('invalid input')
assert len(cfg.params) == 1, "params should have length 1"
assert_allclose(cfg.params[0]['alpha'], 0.1,
rtol=rtol, err_msg="invalid lr on params[0]")
@pytest.mark.parametrize("optim_name", [
('Adam'),
('Lamb')
])
def testOptimizerConfigInvalidParams(optim_name):
# lr is not supported within params
with pytest.raises(AssertionError) as e:
params = [{'params': ['layer1.weight'], 'lr': 0.1}]
if optim_name == 'Adam':
optim.AdamConfig(params=params, lr=0.2)
elif optim_name == 'Lamb':
optim.LambConfig(params=params, lr=0.2)
else:
raise ValueError('invalid input')
assert str(e.value) == "'lr' is not supported inside params"
def testLinearLRSchedulerCreation():
total_steps = 10
warmup = 0.05
lr_scheduler = optim.lr_scheduler.LinearWarmupLRScheduler(total_steps,
warmup)
# Initial state
assert lr_scheduler.total_steps == total_steps
assert lr_scheduler.warmup == warmup
@pytest.mark.parametrize("lr_scheduler,expected_values", [
(optim.lr_scheduler.ConstantWarmupLRScheduler,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0]),
(optim.lr_scheduler.CosineWarmupLRScheduler,
[0.0, 0.9763960957919413, 0.9059835861602854, 0.7956724530494887, 0.6563036824392345,\
0.5015739416158049, 0.34668951940611276, 0.2068719061737831, 0.09586187986225325, 0.0245691111902418]),
(optim.lr_scheduler.LinearWarmupLRScheduler,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.8, 0.6, 0.4, 0.2]),
(optim.lr_scheduler.PolyWarmupLRScheduler,
[0.0, 0.9509018036072144, 0.9008016032064128, 0.8507014028056112, 0.8006012024048097,\
0.750501002004008, 0.7004008016032064, 0.6503006012024048, 0.6002004008016032, 0.5501002004008015])
])
def testLRSchedulerUpdateImpl(lr_scheduler, expected_values):
# Test tolerance
rtol = 1e-03
# Initial state
initial_lr = 1
total_steps = 10
warmup = 0.5
optimizer_config = optim.SGDConfig(lr=initial_lr)
lr_scheduler = lr_scheduler(total_steps, warmup)
# First half is warmup
for optimization_step in range(total_steps):
# Emulate ORTTRainer.train_step() call that updates its train_step_info
train_step_info = TrainStepInfo(optimizer_config=optimizer_config, optimization_step=optimization_step)
lr_scheduler._step(train_step_info)
lr_list = lr_scheduler.get_last_lr()
assert len(lr_list) == 1
assert_allclose(lr_list[0],
expected_values[optimization_step], rtol=rtol, err_msg="lr mismatch")
def testInstantiateORTTrainerOptions():
session_options = SessionOptions()
session_options.enable_mem_pattern = False
provider_options = {'EP1': {'key':'val'}}
opts = {'session_options' : session_options,
'provider_options' : provider_options}
opts = orttrainer.ORTTrainerOptions(opts)
assert(opts.session_options.enable_mem_pattern is False)
assert(opts._validated_opts['provider_options']['EP1']['key'] == 'val')
@pytest.mark.parametrize("step_fn, lr_scheduler, expected_lr_values, device", [
('train_step', None, None, 'cuda'),
('eval_step', None, None, 'cpu'),
('train_step', optim.lr_scheduler.ConstantWarmupLRScheduler,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.0, 1.0, 1.0, 1.0], 'cpu'),
('train_step', optim.lr_scheduler.CosineWarmupLRScheduler,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.9045084971874737, 0.6545084971874737, 0.34549150281252633, 0.09549150281252633],
'cuda'),
('train_step', optim.lr_scheduler.LinearWarmupLRScheduler,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.8, 0.6, 0.4, 0.2], 'cpu'),
('train_step', optim.lr_scheduler.PolyWarmupLRScheduler,
[0.0, 0.2, 0.4, 0.6, 0.8, 1.0, 0.80000002, 0.60000004, 0.40000006000000005, 0.20000007999999997], 'cuda')
])
def testInstantiateORTTrainer(step_fn, lr_scheduler, expected_lr_values, device):
total_steps = 1
initial_lr = 1.
rtol = 1e-3
# PyTorch Transformer model as example
opts = {'device' : {'id' : device}}
if lr_scheduler:
total_steps = 10
opts.update({'lr_scheduler' : lr_scheduler(total_steps=total_steps, warmup=0.5)})
opts = orttrainer.ORTTrainerOptions(opts)
optim_config = optim.LambConfig(lr=initial_lr)
model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _test_commons._load_pytorch_transformer_model(device)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts)
# Run a train or evaluation step
if step_fn == 'eval_step':
data, targets = batcher_fn(val_data, 0)
elif step_fn == 'train_step':
data, targets = batcher_fn(train_data, 0)
else:
raise ValueError('Invalid step_fn')
# Export model to ONNX
if step_fn == 'eval_step':
step_fn = trainer.eval_step
output = trainer.eval_step(data, targets)
elif step_fn == 'train_step':
step_fn = trainer.train_step
for i in range(total_steps):
output = trainer.train_step(data, targets)
if lr_scheduler:
lr_list = trainer.options.lr_scheduler.get_last_lr()
assert_allclose(lr_list[0], expected_lr_values[i], rtol=rtol, err_msg="lr mismatch")
else:
raise ValueError('Invalid step_fn')
assert trainer._onnx_model is not None
# Check output shape after train/eval step
for out, desc in zip(output, trainer.model_desc.outputs):
if trainer.loss_fn and desc.is_loss:
continue
assert list(out.size()) == desc.shape
# Check name, shape and dtype of the first len(forward.parameters) ORT graph inputs
sig = inspect.signature(model.forward)
for i in range(len(sig.parameters.keys())):
input_name = trainer.model_desc.inputs[i][0]
input_dim = trainer.model_desc.inputs[i][1]
input_type = trainer.model_desc.inputs[i][2]
assert trainer._onnx_model.graph.input[i].name == input_name
for dim_idx, dim in enumerate(trainer._onnx_model.graph.input[i].type.tensor_type.shape.dim):
assert input_dim[dim_idx] == dim.dim_value
assert input_type == _utils.dtype_onnx_to_torch(
trainer._onnx_model.graph.input[i].type.tensor_type.elem_type)
opset = get_model_opset(trainer._onnx_model)
# Check name, shape and dtype of the ORT graph outputs
for i in range(len(trainer.model_desc.outputs)):
output_name = trainer.model_desc.outputs[i][0]
output_dim = trainer.model_desc.outputs[i][1]
output_type = trainer.model_desc.outputs[i][3]
assert trainer._onnx_model.graph.output[i].name == output_name
for dim_idx, dim in enumerate(trainer._onnx_model.graph.output[i].type.tensor_type.shape.dim):
if opset != 14:
assert output_dim[dim_idx] == dim.dim_value
assert output_type == _utils.dtype_onnx_to_torch(
trainer._onnx_model.graph.output[i].type.tensor_type.elem_type)
# Save current model as ONNX as a file
file_name = os.path.join('_____temp_onnx_model.onnx')
trainer.save_as_onnx(file_name)
assert os.path.exists(file_name)
with open(file_name, "rb") as f:
bin_str = f.read()
reload_onnx_model = onnx.load_model_from_string(bin_str)
os.remove(file_name)
# Create a new trainer from persisted ONNX model and compare with original ONNX model
trainer_from_onnx = orttrainer.ORTTrainer(reload_onnx_model, model_desc, optim_config)
step_fn(data, targets)
assert trainer_from_onnx._onnx_model is not None
assert (id(trainer_from_onnx._onnx_model) != id(trainer._onnx_model))
assert (trainer_from_onnx._onnx_model == trainer._onnx_model)
assert (trainer_from_onnx._onnx_model.graph == trainer._onnx_model.graph)
assert (onnx.helper.printable_graph(trainer_from_onnx._onnx_model.graph) == onnx.helper.printable_graph(trainer._onnx_model.graph))
@pytest.mark.parametrize("seed, device", [
(0, 'cpu'),
(24, 'cuda')
])
def testORTDeterministicCompute(seed, device):
# Common setup
optim_config = optim.LambConfig()
opts = orttrainer.ORTTrainerOptions({
'debug' : {
'deterministic_compute': True
},
'device' : {
'id' : device,
'mem_limit' : 10*1024*1024
}
})
# Setup for the first ORTTRainer run
torch.manual_seed(seed)
set_seed(seed)
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
first_trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts)
data, targets = batcher_fn(train_data, 0)
_ = first_trainer.train_step(data, targets)
assert first_trainer._onnx_model is not None
# Setup for the second ORTTRainer run
torch.manual_seed(seed)
set_seed(seed)
model, _, _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device)
second_trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts)
_ = second_trainer.train_step(data, targets)
assert second_trainer._onnx_model is not None
# Compare two different instances with identical setup
assert id(first_trainer._onnx_model) != id(second_trainer._onnx_model)
_test_helpers.assert_onnx_weights(first_trainer, second_trainer)
@pytest.mark.parametrize("seed,device,expected_loss,fetches", [
(321, 'cuda', [10.5774, 10.4403, 10.4175, 10.2886, 10.2760], False),
(321, 'cuda', [10.5774, 10.4403, 10.4175, 10.2886, 10.2760], True),
])
def testORTTrainerMixedPrecisionLossScaler(seed, device, expected_loss, fetches):
return # TODO: re-enable after nondeterminism on backend is fixed. update numbers
rtol = 1e-3
total_steps = len(expected_loss)
torch.manual_seed(seed)
set_seed(seed)
# Setup ORTTrainer
loss_scaler = amp.DynamicLossScaler()
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'mixed_precision' : {
'enabled' : True,
'loss_scaler' : loss_scaler},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
actual_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
if fetches:
trainer._train_step_info.fetches=['loss']
loss = trainer.train_step(data, targets)
else:
loss, _ = trainer.train_step(data, targets)
actual_loss.append(loss.cpu())
# Eval once just to test fetches in action
val_data, val_targets = batcher_fn(val_data, 0)
if fetches:
trainer._train_step_info.fetches=['loss']
loss = trainer.eval_step(val_data, val_targets)
trainer._train_step_info.fetches=[]
loss, _ = trainer.eval_step(val_data, val_targets)
# Compare loss to ground truth computed from current ORTTrainer API
_test_helpers.assert_model_outputs(expected_loss, actual_loss, True, rtol=rtol)
assert trainer._onnx_model is not None
def _recompute_data():
device_capability_major = torch.cuda.get_device_capability()[0]
if device_capability_major == 7: # V100 for Dev machine
expected_loss = {12: [10.5598, 10.4591, 10.3477, 10.2726, 10.1945],
14: [10.54088, 10.498755, 10.386827, 10.338747, 10.262459]}
return [
(False, False, False, 0, expected_loss), # no recompute
(True, False, False, 0, expected_loss), # attn_dropout recompute
(False, True, False, 0, expected_loss), # gelu recompute
(False, False, True, 0, expected_loss), # transformer_layer recompute
(False, False, True, 1, expected_loss), # transformer_layer recompute with 1 layer
]
elif device_capability_major == 5: # M60 for CI machines
expected_loss = {12: [10.5445, 10.4389, 10.3480, 10.2627, 10.2113],
14: [10.5445, 10.4389, 10.3480, 10.2627, 10.2113]}
return [
(False, False, False, 0, expected_loss), # no recompute
(True, False, False, 0, expected_loss), # attn_dropout recompute
(False, True, False, 0, expected_loss), # gelu recompute
(False, False, True, 0, expected_loss), # transformer_layer recompute
(False, False, True, 1, expected_loss), # transformer_layer recompute with 1 layer
]
@pytest.mark.parametrize("attn_dropout, gelu, transformer_layer, number_layers, expected_loss", _recompute_data())
def testORTTrainerRecompute(attn_dropout, gelu, transformer_layer, number_layers, expected_loss):
seed = 321
device = 'cuda'
rtol = 1e-3
total_steps = len(expected_loss[12])
torch.manual_seed(seed)
set_seed(seed)
# Setup ORTTrainer
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'graph_transformer' : {
'attn_dropout_recompute': attn_dropout,
'gelu_recompute': gelu,
'transformer_layer_recompute': transformer_layer,
'number_recompute_layers': number_layers
},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, val_data, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
actual_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = trainer.train_step(data, targets)
actual_loss.append(loss.cpu())
# Compare loss to ground truth computed from current ORTTrainer API
assert trainer._onnx_model is not None
opset = get_model_opset(trainer._onnx_model)
_test_helpers.assert_model_outputs(expected_loss[opset], actual_loss, True, rtol=rtol)
@pytest.mark.parametrize("seed,device,gradient_accumulation_steps,total_steps,expected_loss", [
(0, 'cuda', 1, 12, [10.5368022919, 10.4146203995, 10.3635568619, 10.2650547028, 10.2284049988, 10.1304626465,\
10.0853414536, 9.9987659454, 9.9472427368, 9.8832416534, 9.8223171234, 9.8222122192]),
(42, 'cuda', 3, 12, [10.6455879211, 10.6247081757, 10.6361322403, 10.5187482834, 10.5345087051, 10.5487670898,\
10.4833698273, 10.4600019455, 10.4535751343, 10.3774127960, 10.4144191742, 10.3757553101]),
(123, 'cuda', 7, 12, [10.5353469849, 10.5261383057, 10.5240392685, 10.5013713837, 10.5678377151, 10.5452117920,\
10.5184345245, 10.4271221161, 10.4458627701, 10.4864749908, 10.4416503906, 10.4467563629]),
(321, 'cuda', 12, 12, [10.5773944855, 10.5428829193, 10.5974750519, 10.5416746140, 10.6009902954, 10.5684127808,\
10.5759754181, 10.5636739731, 10.5613927841, 10.5825119019, 10.6031589508, 10.6199369431]),
])
def testORTTrainerGradientAccumulation(seed, device, gradient_accumulation_steps, total_steps, expected_loss):
return # TODO: re-enable after nondeterminism on backend is fixed. update numbers
rtol = 1e-3
torch.manual_seed(seed)
set_seed(seed)
# Setup ORTTrainer
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'batch' : {'gradient_accumulation_steps' : gradient_accumulation_steps},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
actual_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = trainer.train_step(data, targets)
actual_loss.append(loss.cpu())
# Compare legacy vs experimental APIs
_test_helpers.assert_model_outputs(expected_loss, actual_loss, rtol=rtol)
@pytest.mark.parametrize("dynamic_axes", [
(True),
(False),
])
def testORTTrainerDynamicShape(dynamic_axes):
# Common setup
device = 'cuda'
# Setup ORTTrainer
options = orttrainer.ORTTrainerOptions({})
model, model_desc, my_loss, batcher_fn,\
train_data, _, _ = _test_commons._load_pytorch_transformer_model(device, dynamic_axes=dynamic_axes)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
total_steps = 10
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
if dynamic_axes:
# Forcing batches with different sizes to exercise dynamic shapes
data = data[:-(i+1)]
targets = targets[:-(i+1)*data.size(1)]
_, _ = trainer.train_step(data, targets)
assert trainer._onnx_model is not None
@pytest.mark.parametrize('enable_onnx_contrib_ops', [
(True),
(False),
])
def testORTTrainerInternalUseContribOps(enable_onnx_contrib_ops):
# Common setup
device = 'cuda'
# Setup ORTTrainer
options = orttrainer.ORTTrainerOptions({"_internal_use": {"enable_onnx_contrib_ops": enable_onnx_contrib_ops}})
model, model_desc, my_loss, batcher_fn,\
train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
data, targets = batcher_fn(train_data, 0)
if not enable_onnx_contrib_ops and not pytorch_110:
with pytest.raises(Exception) as e_info:
_, _ = trainer.train_step(data, targets)
else:
_, _ = trainer.train_step(data, targets)
@pytest.mark.parametrize("model_params", [
(['decoder.weight',
'transformer_encoder.layers.0.linear1.bias',
'transformer_encoder.layers.0.linear2.weight',
'transformer_encoder.layers.1.self_attn.out_proj.weight',
'transformer_encoder.layers.1.self_attn.out_proj.bias']),
])
def testORTTrainerFrozenWeights(model_params):
# Common setup
device = 'cuda'
total_steps = 10
# Setup ORTTrainer WITHOUT frozen weights
options = orttrainer.ORTTrainerOptions({})
model, model_desc, my_loss, batcher_fn,\
train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
_, _ = trainer.train_step(data, targets)
# All model_params must be in the session state
assert trainer._onnx_model is not None
session_state = trainer._training_session.get_state()
assert all([param in session_state for param in model_params])
# Setup ORTTrainer WITH frozen weights
options = orttrainer.ORTTrainerOptions({'utils' : {'frozen_weights' : model_params}})
model, _, _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
_, _ = trainer.train_step(data, targets)
# All model_params CANNOT be in the session state
assert trainer._onnx_model is not None
session_state = trainer._training_session.get_state()
assert not all([param in session_state for param in model_params])
@pytest.mark.parametrize("loss_scaler, optimizer_config, gradient_accumulation_steps", [
(None, optim.AdamConfig(), 1),
(None, optim.LambConfig(), 1),
(None, optim.SGDConfig(), 1),
(amp.DynamicLossScaler(), optim.AdamConfig(), 1),
(amp.DynamicLossScaler(), optim.LambConfig(), 5),
#(amp.DynamicLossScaler(), optim.SGDConfig(), 1), # SGD doesnt support fp16
])
def testORTTrainerStateDictWrapModelLossFn(loss_scaler, optimizer_config, gradient_accumulation_steps):
# Common setup
seed = 1
class LinearModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(2, 4)
def forward(self, y=None, x=None):
if y is not None:
return self.linear(x) + y
else:
return self.linear(x) + torch.ones(2, 4)
model_desc = {'inputs' : [('x', [2, 2]),
('label', [2, ])],
'outputs' : [('loss', [], True),
('output', [2, 4])]}
# Dummy data
data1 = torch.randn(2, 2)
label1 = torch.tensor([0, 1], dtype=torch.int64)
data2 = torch.randn(2, 2)
label2 = torch.tensor([0, 1], dtype=torch.int64)
# Setup training based on test parameters
opts = {'debug' : {'deterministic_compute': True},
'batch' : { 'gradient_accumulation_steps' : gradient_accumulation_steps}}
if loss_scaler:
opts['mixed_precision'] = { 'enabled': True, 'loss_scaler': loss_scaler}
opts = orttrainer.ORTTrainerOptions(opts)
# Training session 1
torch.manual_seed(seed)
set_seed(seed)
pt_model = LinearModel()
def loss_fn(x, label):
return F.nll_loss(F.log_softmax(x, dim=1), label)
trainer = orttrainer.ORTTrainer(pt_model, model_desc, optimizer_config, loss_fn=loss_fn, options=opts)
# Check state_dict keys before train. Must be empty
state_dict = trainer.state_dict()
assert state_dict == {}
# Train once and check initial state
trainer.train_step(x=data1, label=label1)
state_dict = trainer.state_dict()
assert all([weight in state_dict['model']['full_precision'].keys() for weight in ['linear.bias', 'linear.weight']])
# Initialize training session 2 from state of Training 1
torch.manual_seed(seed)
set_seed(seed)
trainer2 = orttrainer.ORTTrainer(pt_model, model_desc, optimizer_config, loss_fn=loss_fn, options=opts)
trainer2.load_state_dict(state_dict)
# Verify state was loaded properly
_test_commons.assert_all_states_close_ort(state_dict, trainer2._load_state_dict.args[0])
# Perform a second step in both training session 1 and 2 and verify they match
trainer.train_step(x=data2, label=label2)
state_dict = trainer.state_dict()
trainer2.train_step(x=data2, label=label2)
state_dict2 = trainer2.state_dict()
_test_commons.assert_all_states_close_ort(state_dict, state_dict2)
def testORTTrainerNonPickableModel():
# Common setup
import threading
seed = 1
class UnpickableModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(2, 4)
self._lock = threading.Lock()
def forward(self, y=None, x=None):
with self._lock:
if y is not None:
return self.linear(x) + y
else:
return self.linear(x) + torch.ones(2, 4)
model_desc = {'inputs' : [('x', [2, 2]),
('label', [2, ])],
'outputs' : [('loss', [], True),
('output', [2, 4])]}
# Dummy data
data = torch.randn(2, 2)
label = torch.tensor([0, 1], dtype=torch.int64)
# Setup training based on test parameters
opts = orttrainer.ORTTrainerOptions({'debug' : {'deterministic_compute': True}})
# Training session
torch.manual_seed(seed)
set_seed(seed)
pt_model = UnpickableModel()
def loss_fn(x, label):
return F.nll_loss(F.log_softmax(x, dim=1), label)
optim_config = optim.AdamConfig()
trainer = orttrainer.ORTTrainer(pt_model, model_desc, optim_config, loss_fn=loss_fn, options=opts)
# Train must succeed despite warning
_, _ = trainer.train_step(data, label)
###############################################################################
# Temporary tests comparing Legacy vs Experimental ORTTrainer APIs ############
###############################################################################
@pytest.mark.parametrize("seed,device", [
(1234, 'cuda')
])
def testORTTrainerLegacyAndExperimentalWeightsCheck(seed, device):
# Common data
rtol = 1e-7
total_steps = 5
# Setup for the experimental ORTTRainer run
torch.manual_seed(seed)
set_seed(seed)
optim_config = optim.LambConfig()
opts = orttrainer.ORTTrainerOptions({
'device' : {
'id' : device
},
'debug' : {
'deterministic_compute': True
},
})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts)
# Training loop
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
_ = trainer.train_step(data, targets)
# Setup for the legacy ORTTrainer run
torch.manual_seed(seed)
set_seed(seed)
model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True)
legacy_trainer = Legacy_ORTTrainer(model, my_loss, model_desc, "LambOptimizer", None, lr_desc,
device, _use_deterministic_compute=True)
# Training loop
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
_, _ = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr]))
# Compare legacy vs experimental APIs
_test_helpers.assert_legacy_onnx_weights(trainer, legacy_trainer, rtol=rtol)
@pytest.mark.parametrize("seed,device", [
(321, 'cuda'),
])
def testORTTrainerLegacyAndExperimentalPrecisionLossScaler(seed, device):
# Common data
total_steps = 128
# Setup experimental API
torch.manual_seed(seed)
set_seed(seed)
loss_scaler = amp.DynamicLossScaler()
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'mixed_precision' : {
'enabled' : True,
'loss_scaler' : loss_scaler},
'debug' : {'deterministic_compute' : True,}})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
experimental_loss = []
experimental_preds_dtype = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
exp_loss, exp_preds = trainer.train_step(data, targets)
experimental_loss.append(exp_loss.cpu())
experimental_preds_dtype.append(exp_preds.dtype)
# Setup legacy API
torch.manual_seed(seed)
set_seed(seed)
model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True)
loss_scaler = Legacy_LossScaler('ort_test_input_loss_scalar', True)
legacy_trainer = Legacy_ORTTrainer(model, my_loss, model_desc, "LambOptimizer",
None, lr_desc, device=device,
_use_deterministic_compute=True,
use_mixed_precision=True,
loss_scaler=loss_scaler)
# Training loop
legacy_loss = []
legacy_preds_dtype = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
leg_loss, leg_preds = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr]))
legacy_loss.append(leg_loss.cpu())
legacy_preds_dtype.append(leg_preds.dtype)
# Compare legacy vs experimental APIs
assert experimental_preds_dtype == legacy_preds_dtype
_test_helpers.assert_legacy_onnx_weights(trainer, legacy_trainer)
_test_helpers.assert_model_outputs(legacy_loss, experimental_loss)
@pytest.mark.parametrize("seed,device,gradient_accumulation_steps,total_steps", [
(0, 'cuda', 1, 12),
(42, 'cuda', 3, 12),
(123, 'cuda', 7, 12),
(321, 'cuda', 12, 12),
])
def testORTTrainerLegacyAndExperimentalGradientAccumulation(seed, device, gradient_accumulation_steps, total_steps):
# Common data
torch.set_printoptions(precision=10)
# Setup experimental API
torch.manual_seed(seed)
set_seed(seed)
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'batch' : {'gradient_accumulation_steps' : gradient_accumulation_steps},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
experimental_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
exp_loss, _ = trainer.train_step(data, targets)
experimental_loss.append(exp_loss.cpu())
# Setup legacy API
torch.manual_seed(seed)
set_seed(seed)
model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True)
legacy_trainer = Legacy_ORTTrainer(model, my_loss, model_desc, "LambOptimizer",
None, lr_desc, device=device,
_use_deterministic_compute=True,
gradient_accumulation_steps=gradient_accumulation_steps)
# Training loop
legacy_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
leg_loss, _ = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr]))
legacy_loss.append(leg_loss.cpu())
# Compare legacy vs experimental APIs
_test_helpers.assert_model_outputs(legacy_loss, experimental_loss)
@pytest.mark.parametrize("seed,device,optimizer_config,lr_scheduler, get_lr_this_step", [
(0, 'cuda', optim.AdamConfig, optim.lr_scheduler.ConstantWarmupLRScheduler, _test_commons.legacy_constant_lr_scheduler),
(0, 'cuda', optim.LambConfig, optim.lr_scheduler.ConstantWarmupLRScheduler, _test_commons.legacy_constant_lr_scheduler),
(0, 'cuda', optim.SGDConfig, optim.lr_scheduler.ConstantWarmupLRScheduler, _test_commons.legacy_constant_lr_scheduler),
(42, 'cuda', optim.AdamConfig, optim.lr_scheduler.LinearWarmupLRScheduler, _test_commons.legacy_linear_lr_scheduler),
(42, 'cuda', optim.LambConfig, optim.lr_scheduler.LinearWarmupLRScheduler, _test_commons.legacy_linear_lr_scheduler),
(42, 'cuda', optim.SGDConfig, optim.lr_scheduler.LinearWarmupLRScheduler, _test_commons.legacy_linear_lr_scheduler),
(123, 'cuda', optim.AdamConfig, optim.lr_scheduler.CosineWarmupLRScheduler, _test_commons.legacy_cosine_lr_scheduler),
(123, 'cuda', optim.LambConfig, optim.lr_scheduler.CosineWarmupLRScheduler, _test_commons.legacy_cosine_lr_scheduler),
(123, 'cuda', optim.SGDConfig, optim.lr_scheduler.CosineWarmupLRScheduler, _test_commons.legacy_cosine_lr_scheduler),
(321, 'cuda', optim.AdamConfig, optim.lr_scheduler.PolyWarmupLRScheduler, _test_commons.legacy_poly_lr_scheduler),
(321, 'cuda', optim.LambConfig, optim.lr_scheduler.PolyWarmupLRScheduler, _test_commons.legacy_poly_lr_scheduler),
(321, 'cuda', optim.SGDConfig, optim.lr_scheduler.PolyWarmupLRScheduler, _test_commons.legacy_poly_lr_scheduler),
])
def testORTTrainerLegacyAndExperimentalLRScheduler(seed, device, optimizer_config, lr_scheduler, get_lr_this_step):
# Common data
total_steps = 10
lr = 0.001
warmup = 0.5
cycles = 0.5
power = 1.
lr_end = 1e-7
torch.set_printoptions(precision=10)
# Setup experimental API
torch.manual_seed(seed)
set_seed(seed)
if lr_scheduler == optim.lr_scheduler.ConstantWarmupLRScheduler or lr_scheduler == optim.lr_scheduler.LinearWarmupLRScheduler:
lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup)
elif lr_scheduler == optim.lr_scheduler.CosineWarmupLRScheduler:
lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, cycles=cycles)
elif lr_scheduler == optim.lr_scheduler.PolyWarmupLRScheduler:
lr_scheduler = lr_scheduler(total_steps=total_steps, warmup=warmup, power=power, lr_end=lr_end)
else:
raise RuntimeError("Invalid lr_scheduler")
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'debug' : {'deterministic_compute' : True},
'lr_scheduler' : lr_scheduler})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optimizer_config(lr=lr)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
experimental_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
exp_loss, exp_preds = trainer.train_step(data, targets)
experimental_loss.append(exp_loss.cpu())
# Setup legacy API
torch.manual_seed(seed)
set_seed(seed)
if optimizer_config == optim.AdamConfig:
legacy_optimizer_config = 'AdamOptimizer'
elif optimizer_config == optim.LambConfig:
legacy_optimizer_config = 'LambOptimizer'
elif optimizer_config == optim.SGDConfig:
legacy_optimizer_config = 'SGDOptimizer'
else:
raise RuntimeError("Invalid optimizer_config")
if get_lr_this_step == _test_commons.legacy_constant_lr_scheduler or get_lr_this_step == _test_commons.legacy_linear_lr_scheduler:
get_lr_this_step = partial(get_lr_this_step, initial_lr=lr, total_steps=total_steps, warmup=warmup)
elif get_lr_this_step == _test_commons.legacy_cosine_lr_scheduler:
get_lr_this_step = partial(get_lr_this_step, initial_lr=lr, total_steps=total_steps, warmup=warmup, cycles=cycles)
elif get_lr_this_step == _test_commons.legacy_poly_lr_scheduler:
get_lr_this_step = partial(get_lr_this_step, initial_lr=lr, total_steps=total_steps, warmup=warmup, power=power, lr_end=lr_end)
else:
raise RuntimeError("Invalid get_lr_this_step")
model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True)
legacy_trainer = Legacy_ORTTrainer(model, my_loss, model_desc, legacy_optimizer_config,
None, lr_desc, device=device,
_use_deterministic_compute=True,
get_lr_this_step=get_lr_this_step)
# Training loop
legacy_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
leg_loss, leg_preds = legacy_trainer.train_step(data, targets)
legacy_loss.append(leg_loss.cpu())
# Compare legacy vs experimental APIs
_test_helpers.assert_model_outputs(legacy_loss, experimental_loss)
def testLossScalerLegacyAndExperimentalFullCycle():
info = orttrainer.TrainStepInfo(optimizer_config=optim.LambConfig(lr=0.001), all_finite=True, fetches=[], optimization_step=0, step=0)
new_ls = amp.DynamicLossScaler()
old_ls = Legacy_LossScaler("ort_test_input_loss_scaler", True)
# Initial state
train_step_info = orttrainer.TrainStepInfo(optim.LambConfig())
assert_allclose(new_ls.loss_scale, old_ls.loss_scale_)
assert new_ls.up_scale_window == old_ls.up_scale_window_
assert_allclose(new_ls.min_loss_scale, old_ls.min_loss_scale_)
assert_allclose(new_ls.max_loss_scale, old_ls.max_loss_scale_)
# Performing 9*2000 updates to cover all branches of LossScaler.update(train_step_info.all_finite=True)
for cycles in range(1, 10):
# 1999 updates without overflow produces 1999 stable steps
for i in range(1, 2000):
new_loss_scale = new_ls.update(train_step_info)
old_ls.update_loss_scale(train_step_info.all_finite)
old_loss_scale = old_ls.loss_scale_
assert new_ls._stable_steps_count == old_ls.stable_steps_
assert_allclose(new_loss_scale, old_loss_scale)
# 2000th update without overflow doubles the loss and zero stable steps until max_loss_scale is reached
new_loss_scale = new_ls.update(train_step_info)
old_ls.update_loss_scale(train_step_info.all_finite)
old_loss_scale = old_ls.loss_scale_
assert new_ls._stable_steps_count == old_ls.stable_steps_
assert_allclose(new_loss_scale, old_loss_scale)
# After 8 cycles, loss scale should be float(1 << 16)*(2**8)
assert_allclose(new_loss_scale, old_loss_scale)
# After 9 cycles, loss scale reaches max_loss_scale and it is not doubled from that point on
for count in range(1, 2050):
new_loss_scale = new_ls.update(train_step_info)
old_ls.update_loss_scale(train_step_info.all_finite)
old_loss_scale = old_ls.loss_scale_
assert new_ls._stable_steps_count == old_ls.stable_steps_
assert_allclose(new_loss_scale, old_loss_scale)
# Setting train_step_info.all_finite = False to test down scaling
train_step_info.all_finite = False
# Performing 24 updates to half the loss scale each time
for count in range(1, 25):
new_loss_scale = new_ls.update(train_step_info)
old_ls.update_loss_scale(train_step_info.all_finite)
old_loss_scale = old_ls.loss_scale_
assert new_ls._stable_steps_count == old_ls.stable_steps_
assert_allclose(new_loss_scale, old_loss_scale)
# After 24 updates with gradient overflow, loss scale is 1.0
assert_allclose(new_loss_scale, old_loss_scale)
# After 25 updates, min_loss_scale is reached and loss scale is not halfed from that point on
for count in range(1, 5):
new_loss_scale = new_ls.update(train_step_info)
old_ls.update_loss_scale(train_step_info.all_finite)
old_loss_scale = old_ls.loss_scale_
assert new_ls._stable_steps_count == old_ls.stable_steps_
assert_allclose(new_loss_scale, old_loss_scale)
def testLossScalerLegacyAndExperimentalRandomAllFinite():
new_ls = amp.DynamicLossScaler()
old_ls = Legacy_LossScaler("ort_test_input_loss_scaler", True)
# Initial state
train_step_info = orttrainer.TrainStepInfo(optim.LambConfig())
assert_allclose(new_ls.loss_scale, old_ls.loss_scale_)
assert new_ls.up_scale_window == old_ls.up_scale_window_
assert_allclose(new_ls.min_loss_scale, old_ls.min_loss_scale_)
assert_allclose(new_ls.max_loss_scale, old_ls.max_loss_scale_)
import random
out = []
for _ in range(1, 64):
train_step_info.all_finite = bool(random.getrandbits(1))
new_loss_scale = new_ls.update(train_step_info)
old_ls.update_loss_scale(train_step_info.all_finite)
old_loss_scale = old_ls.loss_scale_
assert new_ls._stable_steps_count == old_ls.stable_steps_
assert_allclose(new_loss_scale, old_loss_scale)
out.append(new_loss_scale)
assert new_loss_scale > 1e-7
def testORTTrainerRunSymbolicShapeInfer():
# Common data
seed = 0
total_steps = 12
device = 'cuda'
torch.set_printoptions(precision=10)
# Setup without symbolic shape inference
torch.manual_seed(seed)
set_seed(seed)
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
expected_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = trainer.train_step(data, targets)
expected_loss.append(loss.cpu())
# Setup with symbolic shape inference
torch.manual_seed(seed)
set_seed(seed)
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001)
options.utils.run_symbolic_shape_infer = True
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
new_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = trainer.train_step(data, targets)
new_loss.append(loss.cpu())
# Setup with symbolic shape inference in legacy API
torch.manual_seed(seed)
set_seed(seed)
model, (model_desc, lr_desc), _, _, _, _, _ = _test_commons._load_pytorch_transformer_model(device, legacy_api=True)
legacy_trainer = Legacy_ORTTrainer(model, my_loss, model_desc, "LambOptimizer",
None, lr_desc, device=device,
run_symbolic_shape_infer=True,
_use_deterministic_compute=True)
# Training loop
legacy_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = legacy_trainer.train_step(data, targets, torch.tensor([optim_config.lr]))
legacy_loss.append(loss.cpu())
# Compare losses
_test_helpers.assert_model_outputs(new_loss, expected_loss)
_test_helpers.assert_model_outputs(legacy_loss, expected_loss)
@pytest.mark.parametrize("test_input", [
({
'distributed': {'enable_adasum': True},
})
])
def testORTTrainerOptionsEnabledAdasumFlag(test_input):
''' Test the enabled_adasum flag values when set enabled'''
actual_values = orttrainer_options.ORTTrainerOptions(test_input)
assert actual_values.distributed.enable_adasum == True
@pytest.mark.parametrize("test_input", [
({
'distributed': {'enable_adasum': False},
})
])
def testORTTrainerOptionsDisabledAdasumFlag(test_input):
''' Test the enabled_adasum flag values when set disabled'''
actual_values = orttrainer_options.ORTTrainerOptions(test_input)
assert actual_values.distributed.enable_adasum == False
def testORTTrainerUnusedInput():
class UnusedInputModel(torch.nn.Module):
def __init__(self):
super(UnusedInputModel, self).__init__()
def forward(self, x, y):
return torch.mean(x)
model = UnusedInputModel()
model_desc = {'inputs': [('x', [1]), ('y', [1])], 'outputs': [('loss', [], True)]}
optim_config = optim.LambConfig(lr=0.001)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config)
# Run just one step to make sure there are no iobinding errors for the unused input.
try:
trainer.train_step(torch.FloatTensor([1.0]), torch.FloatTensor([1.0]))
except RuntimeError:
pytest.fail("RuntimeError doing train_step with an unused input.")
@pytest.mark.parametrize("debug_files", [
{'model_after_graph_transforms_path': 'transformed.onnx',
'model_with_gradient_graph_path': 'transformed_grad.onnx',
'model_with_training_graph_path': 'training.onnx',
'model_with_training_graph_after_optimization_path': 'training_optimized.onnx'
},
{'model_after_graph_transforms_path': 'transformed.onnx',
'model_with_training_graph_path': ''
},
])
def testTrainingGraphExport(debug_files):
device = 'cuda'
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
with tempfile.TemporaryDirectory() as tempdir:
debug_paths = {}
for k,v in debug_files.items():
debug_paths[k] = os.path.join(tempdir, v)
opts = orttrainer.ORTTrainerOptions(
{
"device": {"id": device},
"debug": {"graph_save_paths": debug_paths}
}
)
optim_config = optim.AdamConfig()
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=opts)
data, targets = batcher_fn(train_data, 0)
trainer.train_step(data, targets)
for k,v in debug_files.items():
path = debug_paths[k]
if len(v) > 0:
assert os.path.isfile(path)
saved_graph = onnx.load(path).graph
if k == 'model_with_training_graph_path':
assert any("AdamOptimizer" in n.op_type for n in saved_graph.node)
elif k == 'model_with_gradient_graph_path':
assert any("Grad" in n.name for n in saved_graph.node)
elif k == 'model_after_graph_transforms_path':
assert any("LayerNormalization" in n.op_type for n in saved_graph.node)
elif k == 'model_with_training_graph_after_optimization_path':
assert any("FusedMatMul" in n.op_type for n in saved_graph.node)
# remove saved file
os.remove(path)
else:
assert not os.path.isfile(path)
def _adam_max_norm_clip_data():
device_capability_major = torch.cuda.get_device_capability()[0]
if device_capability_major == 7: # V100 for Dev machine
return [
(0, 'cuda', 1.0, 1, 12, {
12: [10.592951, 10.067989, 9.619152, 9.245731, 8.881137,
8.578644, 8.280573, 8.063023, 7.797933, 7.486215, 7.233806, 7.011791],
14: [10.584141, 10.068119, 9.581743, 9.191472, 8.880169, 8.5352,
8.311425, 8.061202, 7.773032, 7.523009, 7.258711, 7.02805]}),
(0, 'cuda', 0.1, 1, 12, {
12: [10.592951, 10.068722, 9.620503, 9.247791, 8.883972,
8.582286, 8.285027, 8.068308, 7.803638, 7.492318, 7.240352, 7.018665],
14: [10.584141, 10.068845, 9.583107, 9.193537, 8.882966, 8.538839,
8.315872, 8.066408, 7.778978, 7.529708, 7.265849, 7.035439]}),
(42, 'cuda', 1.0, 1, 12, {
12: [10.647908, 10.144501, 9.672352, 9.306980, 8.956026,
8.602655, 8.351079, 8.088144, 7.867220, 7.564082, 7.289846, 7.073726],
14: [10.697515, 10.229034, 9.765422, 9.428294, 9.080612, 8.715208,
8.459574, 8.169073, 7.940211, 7.654147, 7.390446, 7.166227]}),
(42, 'cuda', 0.1, 1, 12, {
12: [10.647908, 10.145191, 9.673690, 9.309031, 8.959020,
8.606632, 8.355836, 8.093478, 7.873327, 7.570731, 7.296772, 7.0809422],
14: [10.697515, 10.22967, 9.766556, 9.430037, 9.083106, 8.718601,
8.463726, 8.17396, 7.945755, 7.660188, 7.396963, 7.172944]})
]
elif device_capability_major == 5: # M60 for CI machines (Python Packaging Pipeline)
return [
(0, 'cuda', 1.0, 1, 12, {
12: [10.618382, 10.08292 , 9.603334, 9.258133, 8.917768, 8.591574,
8.318401, 8.042292, 7.783608, 7.50226 , 7.236041, 7.035602],
14: [10.618382, 10.08292 , 9.603334, 9.258133, 8.917768, 8.591574,
8.318401, 8.042292, 7.783608, 7.50226 , 7.236041, 7.035602]}),
(0, 'cuda', 0.1, 1, 12, {
12: [10.618382, 10.083632, 9.604639, 9.260109, 8.920504, 8.595082,
8.322799, 8.047493, 7.78929 , 7.508382, 7.242587, 7.042367],
14: [10.618382, 10.083632, 9.604639, 9.260109, 8.920504, 8.595082,
8.322799, 8.047493, 7.78929 , 7.508382, 7.242587, 7.042367]}),
(42, 'cuda', 1.0, 1, 12, {
12: [10.68639 , 10.102986, 9.647681, 9.293091, 8.958928, 8.625297,
8.351107, 8.079577, 7.840723, 7.543044, 7.284141, 7.072688],
14: [10.68639 , 10.102986, 9.647681, 9.293091, 8.958928, 8.625297,
8.351107, 8.079577, 7.840723, 7.543044, 7.284141, 7.072688]}),
(42, 'cuda', 0.1, 1, 12, {
12: [10.68639 , 10.103672, 9.649025, 9.295167, 8.961777, 8.629059,
8.355571, 8.084871, 7.846589, 7.549438, 7.290722, 7.079446],
14: [10.697515, 10.22967, 9.766556, 9.430037, 9.083106, 8.718601,
8.463726, 8.17396, 7.945755, 7.660188, 7.396963, 7.172944]}),
]
@pytest.mark.parametrize("seed,device,max_norm_clip,gradient_accumulation_steps,total_steps,expected_loss", _adam_max_norm_clip_data())
def testORTTrainerAdamMaxNormClip(seed, device, max_norm_clip, gradient_accumulation_steps, total_steps, expected_loss):
rtol = 1e-5
torch.manual_seed(seed)
set_seed(seed)
# Setup ORTTrainer
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'batch' : {'gradient_accumulation_steps' : gradient_accumulation_steps},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.AdamConfig(lr=0.001, max_norm_clip=max_norm_clip)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
actual_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = trainer.train_step(data, targets)
actual_loss.append(loss.cpu().item())
# Compare legacy vs experimental APIs
assert trainer._onnx_model is not None
opset = get_model_opset(trainer._onnx_model)
_test_helpers.assert_model_outputs(expected_loss[opset], actual_loss, rtol=rtol)
def _lamb_max_norm_clip_data():
device_capability_major = torch.cuda.get_device_capability()[0]
if device_capability_major == 7: # V100 for Dev machine
return [
(0, 'cuda', 1.0, 1, 12, {
12: [10.592951, 10.487728, 10.422251, 10.350913, 10.244248, 10.213003,
10.129222, 10.095112, 10.035983, 9.974586, 9.909771, 9.874278],
14: [10.584141, 10.497192, 10.389251, 10.286045, 10.231354, 10.17018,
10.066779, 10.048138, 9.958029, 9.8908, 9.82965, 9.755484]}),
(0, 'cuda', 0.1, 1, 12, {
12: [10.592951, 10.452503, 10.349832, 10.245314, 10.106587, 10.046009,
9.934781, 9.875164, 9.792067, 9.704592, 9.617104, 9.563070],
14: [10.584141, 10.461154, 10.315399, 10.178979, 10.092329, 9.999928,
9.869949, 9.824564, 9.707565, 9.61643, 9.532847, 9.439593]}),
(42, 'cuda', 1.0, 1, 12, {
12: [10.647908, 10.566276, 10.476154, 10.406275, 10.311079, 10.240053,
10.196469, 10.113955, 10.117376, 10.013077, 9.930301, 9.893368],
14: [10.697515, 10.631279, 10.528757, 10.496689, 10.411219, 10.322109,
10.297314, 10.215549, 10.149698, 10.087336, 10.010884, 9.934544]}),
(42, 'cuda', 0.1, 1, 12, {
12: [10.647908, 10.531957, 10.405246, 10.302971, 10.176583, 10.075583,
10.005772, 9.897825, 9.875748, 9.748932, 9.642885, 9.586762],
14: [10.697515, 10.596729, 10.457815, 10.393475, 10.277581, 10.158909,
10.108126, 10.000326, 9.912526, 9.826057, 9.727899, 9.633768]})
]
elif device_capability_major == 5: # M60 for CI machines (Python Packaging Pipeline)
return [
(0, 'cuda', 1.0, 1, 12, {
12: [10.618382, 10.50222, 10.403347, 10.35298, 10.288447, 10.237399,
10.184225, 10.089048, 10.008952, 9.972644, 9.897674, 9.84524],
14: [0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4]}),
(0, 'cuda', 0.1, 1, 12, {
12: [10.618382, 10.466732, 10.330871, 10.24715 , 10.150972, 10.069127,
9.98974 , 9.870169, 9.763693, 9.704323, 9.605957, 9.533117],
14: [1, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4]}),
(42, 'cuda', 1.0, 1, 12, {
12: [10.68639 , 10.511692, 10.447308, 10.405255, 10.334866, 10.261473,
10.169422, 10.107138, 10.069889, 9.97798, 9.928105, 9.896435],
14: [2, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4]}),
(42, 'cuda', 0.1, 1, 12, {
12: [10.68639 , 10.477489, 10.376671, 10.301725, 10.200718, 10.098477,
9.97995 , 9.890104, 9.828899, 9.713555, 9.639567, 9.589856],
14: [3, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4]}),
]
@pytest.mark.parametrize("seed,device,max_norm_clip, gradient_accumulation_steps,total_steps,expected_loss", _lamb_max_norm_clip_data())
def testORTTrainerLambMaxNormClip(seed, device, max_norm_clip, gradient_accumulation_steps, total_steps, expected_loss):
rtol = 1e-3
torch.manual_seed(seed)
set_seed(seed)
# Setup ORTTrainer
options = orttrainer.ORTTrainerOptions({'device' : {'id' : device},
'batch' : {'gradient_accumulation_steps' : gradient_accumulation_steps},
'debug' : {'deterministic_compute' : True}})
model, model_desc, my_loss, batcher_fn, train_data, _, _ = _test_commons._load_pytorch_transformer_model(device)
optim_config = optim.LambConfig(lr=0.001, max_norm_clip=max_norm_clip)
trainer = orttrainer.ORTTrainer(model, model_desc, optim_config, loss_fn=my_loss, options=options)
# Training loop
actual_loss = []
for i in range(total_steps):
data, targets = batcher_fn(train_data, i)
loss, _ = trainer.train_step(data, targets)
actual_loss.append(loss.cpu().item())
# Compare legacy vs experimental APIs
opset = get_model_opset(trainer._onnx_model)
_test_helpers.assert_model_outputs(expected_loss[opset], actual_loss, rtol=rtol)
| 45.993361
| 138
| 0.649473
|
a55506b69ec3f4c998c3ff47161bed66b5f12003
| 357
|
py
|
Python
|
scripts/remove_alpha.py
|
OpenVoiceOS/ovos-intent-plugin-regex
|
43979c767ebdb1ffc065f76cfed4c4a2f331e805
|
[
"Apache-2.0"
] | null | null | null |
scripts/remove_alpha.py
|
OpenVoiceOS/ovos-intent-plugin-regex
|
43979c767ebdb1ffc065f76cfed4c4a2f331e805
|
[
"Apache-2.0"
] | null | null | null |
scripts/remove_alpha.py
|
OpenVoiceOS/ovos-intent-plugin-regex
|
43979c767ebdb1ffc065f76cfed4c4a2f331e805
|
[
"Apache-2.0"
] | null | null | null |
import fileinput
from os.path import join, dirname
version_file = join(dirname(dirname(__file__)), "ovos_intent_plugin_regex", "version.py")
alpha_var_name = "VERSION_ALPHA"
for line in fileinput.input(version_file, inplace=True):
if line.startswith(alpha_var_name):
print(f"{alpha_var_name} = 0")
else:
print(line.rstrip('\n'))
| 25.5
| 89
| 0.719888
|
62cf086f3d2c0bdef38b598f1e7ab8ac2985bd12
| 1,298
|
py
|
Python
|
maindatabase/urls.py
|
dragonman164/Database-for-Covid-19-Vaccination-with-API-support
|
b34c29414967fcdf2d9fb34ed815f5c2752694b6
|
[
"MIT"
] | 1
|
2020-12-30T16:31:14.000Z
|
2020-12-30T16:31:14.000Z
|
maindatabase/urls.py
|
rishusingh022/Database-for-Covid-19-Vaccination-with-API-support
|
8d2137f06d10ca620a4fe24ffc34c410b8a78ad6
|
[
"MIT"
] | null | null | null |
maindatabase/urls.py
|
rishusingh022/Database-for-Covid-19-Vaccination-with-API-support
|
8d2137f06d10ca620a4fe24ffc34c410b8a78ad6
|
[
"MIT"
] | 2
|
2020-12-19T19:27:08.000Z
|
2021-01-02T07:43:39.000Z
|
"""databaseforvaccination URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.conf.urls import url,include
from .views import index,license,table,PersonList,ReportList,ManagementList,Person_without_aadhar_viewer
from rest_framework.routers import DefaultRouter
urlpatterns = [
path('',index,name="Index"),
path('license/',license,name="license"),
path('table/',table,name="table"),
path('api/',PersonList.as_view(),name="API"),
path('api1/',ReportList.as_view(),name="API1"),
path('api2/',ManagementList.as_view(),name="API2"),
url('api3/',Person_without_aadhar_viewer.as_view(),name="API3"),
# path('api3/',Person_without_aadhar_viewer,name="API3")
]
| 37.085714
| 104
| 0.718798
|
d53444326b83734e65f82dda14860b6949caea4a
| 3,228
|
py
|
Python
|
backend/testScripy.py
|
SWTangKai/Graph2vec
|
4df0fa9b933eedbf87bde844151b8b1fec79f8c0
|
[
"MIT"
] | 1
|
2019-02-11T07:21:56.000Z
|
2019-02-11T07:21:56.000Z
|
backend/testScripy.py
|
SWTangKai/Graph2vec
|
4df0fa9b933eedbf87bde844151b8b1fec79f8c0
|
[
"MIT"
] | null | null | null |
backend/testScripy.py
|
SWTangKai/Graph2vec
|
4df0fa9b933eedbf87bde844151b8b1fec79f8c0
|
[
"MIT"
] | null | null | null |
from nelib.graph import Graph
from nelib.node2vec import Node2vec
from nelib.forceatlas2 import ForceAtlas2
import networkx as nx
import matplotlib.pyplot as plt
GRAPH_PATH = './data/cora/cora_edgelist.txt'
G = Graph()
G.read_edgelist(GRAPH_PATH)
model1 = Node2vec(G, 10, 10, 2, p=1.0, q=1.0)
# model2 = Node2vec(G, 10, 10, 2, p=1.0, q=3.0)
# model3 = Node2vec(G, 10, 10, 2, p=3.0, q=1.0)
# plt.figure()
# plt.title('model1')
# nx.draw(G.G, pos=model1.vectors, with_labels=True)
# plt.figure()
# plt.title('model2')
# nx.draw(G.G, pos=model2.vectors, with_labels=True)
# plt.figure()
# plt.title('model3')
# nx.draw(G.G, pos=model3.vectors, with_labels=True)
# plt.figure()
plt.title('origin')
nx.draw(G.G, node_size=5, with_labels=False)
plt.show()
# vec = model1.vectors
# x = np.array([vec[k] for k in vec])
# plt.scatter(x[:, 0], x[:, 1])
# plt.show()
# G = nx.karate_club_graph()
forceatlas2 = ForceAtlas2(
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
# Prevent overlap (NOT IMPLEMENTED)
adjustSizes=False,
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
# Log
verbose=False)
# positions = forceatlas2.forceatlas2_networkx_layout(
# G, pos=None, iterations=2000)
# nx.draw_networkx(G, positions, cmap=plt.get_cmap(
# 'jet'), node_size=50, with_labels=True)
# plt.show()
# from matplotlib import animation
# fig = plt.figure(figsize=(10, 10))
# def init():
# model1 = Node2vec(G, 10, 10, 2, p=1.0, q=1.0)
# global pos
# pos = model1.vectors
# nx.draw(G.G, pos=pos, with_labels=True)
# return pos
# def animate(i):
# global pos
# fig = plt.figure(figsize=(10, 10))
# pos = forceatlas2.forceatlas2_networkx_layout(
# G.G, pos=model1.vectors, iterations=1)
# nx.draw(G.G, pos=pos, with_labels=True)
# return fig
# ani = animation.FuncAnimation(
# fig=fig, func=animate, frames=100, init_func=init, interval=20, repeat=False, blit=True)
# #!/usr/bin/env python
# import random
# import pylab
# from matplotlib.pyplot import pause
# import networkx as nx
# pylab.ion()
# # graph = nx.Graph()
# # node_number = 0
# # graph.add_node(node_number, Position=(random.randrange(0, 100), random.randrange(0, 100)))
# def get_fig():
# global pos
# # node_number += 1
# # graph.add_node(node_number, Position=(random.randrange(0, 100), random.randrange(0, 100)))
# # graph.add_edge(node_number, random.choice(graph.nodes()))
# # nx.draw(graph, pos=nx.get_node_attributes(graph,'Position'))
# pos = forceatlas2.forceatlas2_networkx_layout(
# G.G, pos=pos, iterations=1)
# nx.draw(G.G, pos=pos, with_labels=True)
# pos = model1.vectors
# num_plots = 50
# pylab.show()
# for i in range(num_plots):
# # plt.clf()
# get_fig()
# pylab.draw()
# pause(0.1)
# print(i)
#%%
print(1)
| 25.619048
| 98
| 0.641264
|
9f56225c11ed3de6fdc88bde3a0fd444f7f98829
| 1,291
|
py
|
Python
|
python/example_code/acm/list_certificates.py
|
AkhmadRiswanda/aws-doc-sdk-examples
|
46dbd6e1002f4d5c056df3eb478c318501782a17
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/acm/list_certificates.py
|
AkhmadRiswanda/aws-doc-sdk-examples
|
46dbd6e1002f4d5c056df3eb478c318501782a17
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/acm/list_certificates.py
|
AkhmadRiswanda/aws-doc-sdk-examples
|
46dbd6e1002f4d5c056df3eb478c318501782a17
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create ACM client
acm = boto3.client('acm')
# List certificates with the pagination interface
paginator = acm.get_paginator('list_certificates')
for response in paginator.paginate():
for certificate in response['CertificateSummaryList']:
print(certificate)
#snippet-sourcedescription:[list_certificates.py demonstrates how to retrieve a list of certificate ARNs and domain names.]
#snippet-keyword:[Python]
#snippet-keyword:[AWS SDK for Python (Boto3)]
#snippet-keyword:[Code Sample]
#snippet-keyword:[AWS Certificate Manager]
#snippet-service:[acm]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[2018-09-05]
#snippet-sourceauthor:[walkerk1980]
| 34.891892
| 124
| 0.747483
|
aabe70edcb52d64c5942e94abef1cf4d421bbf38
| 237
|
py
|
Python
|
apps/frontend/routes.py
|
ilya-goldin/aiohttp-mvtiles
|
8e1a2c4acba8ee77caf91e191b637b1776bceee6
|
[
"MIT"
] | null | null | null |
apps/frontend/routes.py
|
ilya-goldin/aiohttp-mvtiles
|
8e1a2c4acba8ee77caf91e191b637b1776bceee6
|
[
"MIT"
] | null | null | null |
apps/frontend/routes.py
|
ilya-goldin/aiohttp-mvtiles
|
8e1a2c4acba8ee77caf91e191b637b1776bceee6
|
[
"MIT"
] | null | null | null |
from .views import Handler
def frontend_routes(application):
"""
Append frontend routes and middleware
:param application:
:type application: aiohttp.web.Application
"""
application.router.add_get("/", Handler)
| 21.545455
| 46
| 0.704641
|
0ba0feb540574a8a383ea960a5015d6e40b19be6
| 2,398
|
py
|
Python
|
nova/tests/scheduler/test_rpcapi.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 2
|
2016-04-19T08:20:39.000Z
|
2021-10-03T16:00:37.000Z
|
nova/tests/scheduler/test_rpcapi.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 9
|
2015-05-20T11:20:17.000Z
|
2017-07-27T08:21:33.000Z
|
nova/tests/scheduler/test_rpcapi.py
|
vmthunder/nova
|
baf05caab705c5778348d9f275dc541747b7c2de
|
[
"Apache-2.0"
] | 13
|
2015-05-05T09:34:04.000Z
|
2017-11-08T02:03:46.000Z
|
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.scheduler.rpcapi
"""
import mox
from oslo.config import cfg
from nova import context
from nova.scheduler import rpcapi as scheduler_rpcapi
from nova import test
CONF = cfg.CONF
class SchedulerRpcAPITestCase(test.NoDBTestCase):
def _test_scheduler_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = scheduler_rpcapi.SchedulerAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.scheduler_topic)
expected_retval = 'foo' if rpc_method == 'call' else None
expected_version = kwargs.pop('version', None)
expected_fanout = kwargs.pop('fanout', None)
expected_kwargs = kwargs.copy()
self.mox.StubOutWithMock(rpcapi, 'client')
rpcapi.client.can_send_version(
mox.IsA(str)).MultipleTimes().AndReturn(True)
prepare_kwargs = {}
if expected_fanout:
prepare_kwargs['fanout'] = True
if expected_version:
prepare_kwargs['version'] = expected_version
rpcapi.client.prepare(**prepare_kwargs).AndReturn(rpcapi.client)
rpc_method = getattr(rpcapi.client, rpc_method)
rpc_method(ctxt, method, **expected_kwargs).AndReturn('foo')
self.mox.ReplayAll()
# NOTE(markmc): MultipleTimes() is OnceOrMore() not ZeroOrMore()
rpcapi.client.can_send_version('I fool you mox')
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
def test_select_destinations(self):
self._test_scheduler_api('select_destinations', rpc_method='call',
request_spec='fake_request_spec',
filter_properties='fake_prop')
| 34.257143
| 78
| 0.69141
|
acac657276ab8951d297529eee32493b186e59e9
| 8,501
|
py
|
Python
|
desktop/libs/indexer/src/indexer/indexers/sql.py
|
sandeepreddy3647/hue-1
|
03981c392ef35ae52052fb94549ec3a7b7074b8d
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/indexer/src/indexer/indexers/sql.py
|
sandeepreddy3647/hue-1
|
03981c392ef35ae52052fb94549ec3a7b7074b8d
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/indexer/src/indexer/indexers/sql.py
|
sandeepreddy3647/hue-1
|
03981c392ef35ae52052fb94549ec3a7b7074b8d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.import logging
from future import standard_library
standard_library.install_aliases()
from builtins import object
import logging
import sys
import urllib.request, urllib.error
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from desktop.lib import django_mako
from notebook.models import make_notebook
if sys.version_info[0] > 2:
from urllib.parse import unquote as urllib_unquote
else:
from urllib import unquote as urllib_unquote
LOG = logging.getLogger(__name__)
try:
from beeswax.server import dbms
except ImportError as e:
LOG.warn('Hive and HiveServer2 interfaces are not enabled')
class SQLIndexer(object):
def __init__(self, user, fs):
self.fs = fs
self.user = user
def create_table_from_a_file(self, source, destination, start_time=-1):
if '.' in destination['name']:
database, table_name = destination['name'].split('.', 1)
else:
database = 'default'
table_name = destination['name']
final_table_name = table_name
table_format = destination['tableFormat']
source_type = source['sourceType']
columns = destination['columns']
partition_columns = destination['partitionColumns']
kudu_partition_columns = destination['kuduPartitionColumns']
comment = destination['description']
source_path = urllib_unquote(source['path'])
external = not destination['useDefaultLocation']
external_path = urllib_unquote(destination['nonDefaultLocation'])
load_data = destination['importData']
skip_header = destination['hasHeader']
primary_keys = destination['primaryKeys']
if destination['useCustomDelimiters']:
field_delimiter = destination['customFieldDelimiter']
collection_delimiter = destination['customCollectionDelimiter']
map_delimiter = destination['customMapDelimiter']
else:
field_delimiter = ','
collection_delimiter = r'\002'
map_delimiter = r'\003'
regexp_delimiter = destination['customRegexp']
file_format = 'TextFile'
row_format = 'Delimited'
serde_name = ''
serde_properties = ''
extra_create_properties = ''
sql = ''
if source['inputFormat'] == 'manual':
load_data = False
source['format'] = {
'quoteChar': '"',
'fieldSeparator': ','
}
if table_format == 'json':
row_format = 'serde'
serde_name = 'org.apache.hive.hcatalog.data.JsonSerDe'
elif table_format == 'regexp':
row_format = 'serde'
serde_name = 'org.apache.hadoop.hive.serde2.RegexSerDe'
serde_properties = '"input.regex" = "%s"' % regexp_delimiter
elif table_format == 'csv':
if source['format']['quoteChar'] == '"':
source['format']['quoteChar'] = '\\"'
row_format = 'serde'
serde_name = 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
serde_properties = '''"separatorChar" = "%(fieldSeparator)s",
"quoteChar" = "%(quoteChar)s",
"escapeChar" = "\\\\"
''' % source['format']
if table_format in ('parquet', 'kudu'):
if load_data:
table_name, final_table_name = 'hue__tmp_%s' % table_name, table_name
sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
'database': database,
'table_name': table_name
}
else: # Manual
row_format = ''
file_format = table_format
skip_header = False
if table_format == 'kudu':
columns = [col for col in columns if col['name'] in primary_keys] + [col for col in columns if col['name'] not in primary_keys]
if table_format == 'kudu':
collection_delimiter = None
map_delimiter = None
if external or (load_data and table_format in ('parquet', 'kudu')):
if not self.fs.isdir(external_path): # File selected
external_path, external_file_name = self.fs.split(external_path)
if len(self.fs.listdir(external_path)) > 1:
external_path = external_path + '/%s_table' % external_file_name # If dir not just the file, create data dir and move file there.
self.fs.mkdir(external_path)
self.fs.rename(source_path, external_path)
sql += django_mako.render_to_string("gen/create_table_statement.mako", {
'table': {
'name': table_name,
'comment': comment,
'row_format': row_format,
'field_terminator': field_delimiter,
'collection_terminator': collection_delimiter if source_type == 'hive' else None,
'map_key_terminator': map_delimiter if source_type == 'hive' else None,
'serde_name': serde_name,
'serde_properties': serde_properties,
'file_format': file_format,
'external': external or load_data and table_format in ('parquet', 'kudu'),
'path': external_path,
'skip_header': skip_header,
'primary_keys': primary_keys if table_format == 'kudu' and not load_data else [],
},
'columns': columns,
'partition_columns': partition_columns,
'kudu_partition_columns': kudu_partition_columns,
'database': database
}
)
if table_format in ('text', 'json', 'csv', 'regexp') and not external and load_data:
form_data = {
'path': source_path,
'overwrite': False,
'partition_columns': [(partition['name'], partition['partitionValue']) for partition in partition_columns],
}
query_server_config = dbms.get_query_server_config(name=source_type)
db = dbms.get(self.user, query_server=query_server_config)
sql += "\n\n%s;" % db.load_data(database, table_name, form_data, None, generate_ddl_only=True)
if load_data and table_format in ('parquet', 'kudu'):
file_format = table_format
if table_format == 'kudu':
columns_list = ['`%s`' % col for col in primary_keys + [col['name'] for col in destination['columns'] if col['name'] not in primary_keys and col['keep']]]
extra_create_properties = """PRIMARY KEY (%(primary_keys)s)
PARTITION BY HASH PARTITIONS 16
STORED AS %(file_format)s
TBLPROPERTIES(
'kudu.num_tablet_replicas' = '1'
)""" % {
'file_format': file_format,
'primary_keys': ', '.join(primary_keys)
}
else:
columns_list = ['*']
extra_create_properties = 'STORED AS %(file_format)s' % {'file_format': file_format}
sql += '''\n\nCREATE TABLE `%(database)s`.`%(final_table_name)s`%(comment)s
%(extra_create_properties)s
AS SELECT %(columns_list)s
FROM `%(database)s`.`%(table_name)s`;''' % {
'database': database,
'final_table_name': final_table_name,
'table_name': table_name,
'extra_create_properties': extra_create_properties,
'columns_list': ', '.join(columns_list),
'comment': ' COMMENT "%s"' % comment if comment else ''
}
sql += '\n\nDROP TABLE IF EXISTS `%(database)s`.`%(table_name)s`;\n' % {
'database': database,
'table_name': table_name
}
editor_type = 'impala' if table_format == 'kudu' else destination['sourceType']
on_success_url = reverse('metastore:describe_table', kwargs={'database': database, 'table': final_table_name}) + '?source_type=' + source_type
return make_notebook(
name=_('Creating table %(database)s.%(table)s') % {'database': database, 'table': final_table_name},
editor_type=editor_type,
statement=sql.strip(),
status='ready',
database=database,
on_success_url=on_success_url,
last_executed=start_time,
is_task=True
)
| 37.782222
| 162
| 0.65804
|
4568f8779aa9e9969e9e3d6ba2b40799469fbe49
| 1,414
|
py
|
Python
|
lexical_field_method.py
|
margauxschmied/TATIA
|
a0a9fa027c33aeac962d30dca48f358f4f34e114
|
[
"MIT"
] | 2
|
2022-01-15T17:43:25.000Z
|
2022-01-15T17:55:33.000Z
|
lexical_field_method.py
|
margauxschmied/TATIA
|
a0a9fa027c33aeac962d30dca48f358f4f34e114
|
[
"MIT"
] | null | null | null |
lexical_field_method.py
|
margauxschmied/TATIA
|
a0a9fa027c33aeac962d30dca48f358f4f34e114
|
[
"MIT"
] | null | null | null |
import pandas as pd
from tqdm import trange
from classifier import clean_text
def clean_lexical(df):
lexical = df["lexical_field"]
# new_df = pd.DataFrame()
for i in range(len(df)):
splited = lexical[i].split(" ")
l = clean_text(" ".join(set(splited)))
df["lexical_field"][i] = l
df.to_csv("archive/lexical_field_clean.csv")
def similar(summary, lexical_field):
summary = summary.split(" ")
lexical_field = lexical_field.split(" ")
ret = 0
for s in summary:
if s in lexical_field:
ret += 1
return ret
# partially working version
def get_similarities_counter(df, text):
lexical = df["lexical_field"]
genre = ""
note = 0
clean_test = clean_text(text)
for i in range(len(df)):
tmp = similar(clean_test, lexical[i])
if note < tmp:
note = tmp
genre = df["genre"][i]
# print(genre)
return genre
df = pd.read_csv("archive/lexical_field_clean.csv")
# clean_lexical(df)
data = pd.read_csv("archive/dataset_csv/test_data_solution.csv")
sentences = data["description"].values
genrePredit = ""
error = 0
for i in trange(len(sentences)):
genrePredit = get_similarities_counter(df, sentences[i])
if genrePredit != data["genre"][i]:
error += 1
# print(genrePredit+" "+data["genre"][i])
error = (error / len(sentences)) * 100
print(error)
| 21.753846
| 64
| 0.628713
|
1fa01e469e92735696385d18fc8df5e43d6df6f1
| 1,333
|
py
|
Python
|
octavia/tests/unit/controller/healthmanager/health_drivers/test_update_base.py
|
sajuptpm/octavia
|
fde4ebe822072a79bb74497b504ca3f0a6a6518d
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/controller/healthmanager/health_drivers/test_update_base.py
|
sajuptpm/octavia
|
fde4ebe822072a79bb74497b504ca3f0a6a6518d
|
[
"Apache-2.0"
] | null | null | null |
octavia/tests/unit/controller/healthmanager/health_drivers/test_update_base.py
|
sajuptpm/octavia
|
fde4ebe822072a79bb74497b504ca3f0a6a6518d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 GoDaddy
# Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from octavia.controller.healthmanager.health_drivers import update_base
from octavia.tests.unit import base
class TestHealthUpdateBase(base.TestCase):
def setUp(self):
super(TestHealthUpdateBase, self).setUp()
self.logger = update_base.HealthUpdateBase()
def test_update_health(self):
self.assertRaises(NotImplementedError,
self.logger.update_health, {'id': 1})
class TestStatsUpdateBase(base.TestCase):
def setUp(self):
super(TestStatsUpdateBase, self).setUp()
self.logger = update_base.StatsUpdateBase()
def test_update_stats(self):
self.assertRaises(NotImplementedError,
self.logger.update_stats, {'id': 1})
| 34.179487
| 75
| 0.72093
|
a5154f38b17d43531fc2cd715a7fd0861a4ece97
| 885
|
py
|
Python
|
setup.py
|
Serhiy-Shekhovtsov/clasp
|
d20024304c8fe5dc285b2b35eab9e7135a1163c3
|
[
"MIT"
] | 110
|
2021-04-02T11:37:21.000Z
|
2022-03-28T05:58:21.000Z
|
setup.py
|
Serhiy-Shekhovtsov/clasp
|
d20024304c8fe5dc285b2b35eab9e7135a1163c3
|
[
"MIT"
] | 6
|
2021-04-06T22:42:08.000Z
|
2021-08-23T18:13:54.000Z
|
setup.py
|
Bovey0809/clasp
|
a92acd8ed33f9b4cc53ed99501d66e41a3a0d4f6
|
[
"MIT"
] | 18
|
2021-04-02T13:59:30.000Z
|
2022-03-08T02:39:27.000Z
|
from setuptools import setup, find_packages
setup(
name = 'bioseq-clasp',
packages = find_packages(),
version = '0.0.1',
license='MIT',
description = 'CLASP - CLIP for biosequences and their annotation data',
author = 'MicPie',
author_email = '',
url = 'https://github.com/MicPie/clasp',
keywords = [
'artificial intelligence',
'deep learning',
'contrastive learning',
'proteomics'
],
install_requires=[
'einops>=0.3',
'torch>=1.6',
'ftfy',
'regex',
'requests',
'matplotlib'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
package_data={'': ['data/bpe_simple_vocab_16e6.txt']},
include_package_data=True
)
| 24.583333
| 74
| 0.636158
|
691843a9dee40f33db5df7dbbc03fe0fd0a4087d
| 61,596
|
py
|
Python
|
tensorflow/python/framework/func_graph.py
|
Alavandar08/tensorflow
|
e0d69364ec2ad40ac97b7d41683264080f07c582
|
[
"Apache-2.0"
] | 1
|
2021-04-28T08:36:49.000Z
|
2021-04-28T08:36:49.000Z
|
tensorflow/python/framework/func_graph.py
|
Alavandar08/tensorflow
|
e0d69364ec2ad40ac97b7d41683264080f07c582
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/framework/func_graph.py
|
Alavandar08/tensorflow
|
e0d69364ec2ad40ac97b7d41683264080f07c582
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""FuncGraph and related functionality."""
import collections as py_collections
import itertools
import traceback
import weakref
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager import execute
from tensorflow.python.eager import tape
from tensorflow.python.eager.graph_only_ops import graph_placeholder
from tensorflow.python.framework import auto_control_deps
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import handle_data_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.saved_model import save_context
from tensorflow.python.util import compat
from tensorflow.python.util import memory
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
ALLOWLIST_COLLECTIONS = [
ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES,
ops.GraphKeys.TRAINABLE_VARIABLES,
variable_scope._VARSTORE_KEY, # pylint: disable=protected-access
variable_scope._VARSCOPESTORE_KEY # pylint: disable=protected-access
]
_EAGER_CONST_THRESHOLD = 128
class UnknownArgument(object):
"""Signifies an argument which is not currently handled."""
pass
def convert_structure_to_signature(structure, arg_names=None):
"""Convert a potentially nested structure to a signature.
Args:
structure: Structure to convert, where top level collection is a list or a
tuple.
arg_names: Optional list of arguments that has equal number of elements as
`structure` and is used for naming corresponding TensorSpecs.
Returns:
Identical structure that has TensorSpec objects instead of Tensors and
UnknownArgument instead of any unsupported types.
"""
def encode_arg(arg, path):
"""A representation for this argument, for converting into signatures."""
if isinstance(arg, ops.Tensor):
user_specified_name = None
try:
user_specified_name = compat.as_str(
arg.op.get_attr("_user_specified_name"))
except ValueError:
pass
if path and user_specified_name and user_specified_name != path[0]:
# The user has explicitly named the argument differently than the name
# of the function argument.
name = user_specified_name
else:
name = "/".join(str(p) for p in path)
return tensor_spec.TensorSpec(arg.shape, arg.dtype, name)
if isinstance(arg, composite_tensor.CompositeTensor):
# TODO(b/133606651) Do we need to inject arg_name?
return arg._type_spec # pylint: disable=protected-access
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
name = "/".join(str(p) for p in path)
return resource_variable_ops.VariableSpec(arg.shape, arg.dtype, name,
trainable=arg.trainable)
if isinstance(arg, (
int,
float,
bool,
str,
type(None),
dtypes.DType,
tensor_spec.TensorSpec,
type_spec.TypeSpec,
)):
return arg
return UnknownArgument()
# We are using the flattened paths to name the TensorSpecs. We need an
# explicit name for them downstream.
flattened = nest.flatten_with_tuple_paths(structure)
if arg_names:
if len(arg_names) != len(structure):
raise ValueError(
"Passed in arg_names don't match actual signature (%s)." % arg_names)
# Replace all top-level names with their actual arg_names. If a path before
# was "(2,'a',1)", it will become "(arg_names[2],'a',1)".
flattened = [
((arg_names[path[0]],) + path[1:], arg) for path, arg in flattened
]
mapped = [encode_arg(arg, path) for path, arg in flattened]
return nest.pack_sequence_as(structure, mapped)
@tf_export("__internal__.FuncGraph", v1=[])
class FuncGraph(ops.Graph):
"""Graph representing a function body.
Attributes:
name: The name of the function.
inputs: Placeholder tensors representing the inputs to this function. The
tensors are in this FuncGraph. This represents "regular" inputs as well as
captured inputs (i.e. the values of self.captures), with the regular
inputs coming first.
outputs: Tensors that will be returned by this function. The tensors are in
this FuncGraph.
control_outputs: Operations that must be executed before the function
represented by this graph can be said to have been executed.
structured_input_signature: A tuple of (args, kwargs), which are both
possibly-nested python objects that were received by this function. Note
that these structures might contain Python `None`s.
structured_outputs: A possibly-nested python object which will be returned
by this function. The Tensors in this structure are the same as those of
self.outputs. Note that this structure might contain Python `None`s.
variables: Variables that should be watched during function execution.
outer_graph: The graph this function is defined in. May be another FuncGraph
or the global default Graph.
captures: Maps external tensor -> internal tensor (i.e. input placeholder).
The entries are in the order they were captured.
control_captures: Set of external ops on which this graph has a control
dependency.
seed: The graph-level random seed.
capture_by_value: If True, the func graph will capture Variables by value
instead of reference.
"""
def __init__(self, name, collections=None, capture_by_value=None):
"""Construct a new FuncGraph.
The graph will inherit its graph key, collections, seed, and distribution
strategy stack from the current context or graph.
Args:
name: the name of the function.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write
to) the outer graph's collections that are not allowlisted, and both
read and write to the outer graph's collections that are allowlisted.
The current allowlisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will
capture Variables by value instead of reference. By default inherit
from outer graphs, and failing that will default to False.
"""
super(FuncGraph, self).__init__()
self.name = name
self.inputs = []
self.outputs = []
self.control_outputs = []
self.control_captures = object_identity.ObjectIdentitySet()
self.structured_input_signature = None
self.structured_outputs = None
self._weak_variables = []
self._watched_variables = object_identity.ObjectIdentityWeakSet()
self.is_control_flow_graph = False
outer_graph = ops.get_default_graph()
self._weak_outer_graph = weakref.ref(outer_graph)
while outer_graph.building_function:
outer_graph = outer_graph.outer_graph
# If self._weak_outer_graph is deleted, we revert to the outermost Graph
# active when the FuncGraph was traced. This will not be a FuncGraph.
self._fallback_outer_graph = outer_graph
self._captures = py_collections.OrderedDict()
# If not None, records the names of output args of this function. Used to
# preserve the output names in the signature of a serialized+deserialized
# function. Private at the moment mostly because it's often out of date.
self._output_names = None
# Maps arbitrary key -> (closure, nest of placeholders), where at function
# call time the value of closure() will be used to feed the nest of
# placeholders.
self._deferred_captures = py_collections.OrderedDict()
# Inherit capture-by-value from outer graph.
if capture_by_value is not None:
self.capture_by_value = capture_by_value
elif self.outer_graph is not None and isinstance(
self.outer_graph, FuncGraph):
self.capture_by_value = self.outer_graph.capture_by_value
else:
self.capture_by_value = False
self._building_function = True
# Map from resource tensor name to last op (in program order) which uses
# this tensor. Used to enforce that execution order matches program order
# for resource tensors.
self._last_op_using_resource_tensor = {}
graph = self.outer_graph
if context.executing_eagerly():
self.seed = context.global_seed()
# [for tf-data user migration from TF1.0 to 2.0] seed_used keep track of
# any None op_seed for random_op in the function, in which case we end up
# using function seed, which could be unintended behavior for the op.
self._seed_used = False
else:
self.seed = graph.seed
self._seed_used = False
# TODO(allenl): Figure out if we can remove colocation stack
# specialization (currently used in cond_v2), here and in the cache key.
self._colocation_stack = graph._colocation_stack.copy() # pylint: disable=protected-access
if collections is None:
for collection_name in graph.get_all_collection_keys():
if collection_name not in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection(
collection_name)
for collection_name in ALLOWLIST_COLLECTIONS:
self._collections[collection_name] = graph.get_collection_ref(
collection_name)
else:
self._collections = collections
# Keep track of whether this FuncGraph is exportable to SavedModel. Use
# `graph.mark_as_unsaveable(reason)` to mark this FuncGraph and any
# dependent functions as unsaveable.
self._saveable = True
self._saving_errors = set()
# Keep track of callbacks to run when this graph exits default scope
self._scope_exit_callbacks = None
def __str__(self):
return "FuncGraph(name=%s, id=%s)" % (self.name, id(self))
def watch_variable(self, v):
"""Marks the variable v as accessed while building this graph."""
while self is not None and isinstance(self, FuncGraph):
self._watched_variables.add(v)
self = self.outer_graph
def capture_call_time_value(self,
closure,
spec,
key=None,
default_value=None,
placeholder=None):
"""Returns a placeholder which at call time has the value closure().
The `tf.function` supports the notion of captures, that is, it allows Python
functions to have closure variables, which bind over some value outside the
function. However, this name binding is "early binding" performed before the
program is run, i.e.,
```
@tf.function
def f():
return x
x = tf.constant(1)
f() # returns 1
x = tf.constant(2)
f() # still returns 1!
```
while in Python, name binding is performed as the program is running.
```
def f():
return x
x = 1
f() # returns 1
x = 2
f() # returns 2
```
`capture_call_time_value` allows tf.function to mimic late binding as a
Python function does, by passing in a `closure` callable argument to be
executed when the tf.function is invoked eagerly. E.g.
```
@tf.function
def f():
return ops.get_default_graph.capture_call_time_value(lambda: x)
x = tf.constant(1)
f() # returns 1
x = tf.constant(2)
f() # returns 2
```
Note that a `capture_call_time_value` function itself does not work well in
the saving process (since the tf.function in which it's called is not
invoked eagerly) unless passed a `default_value` argument. At saving time,
the `default_value` argument is returned instead.
Args:
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
key: optional. If not None, multiple calls to lazy_capture with the same
key in the same graph will return the same placeholder, and the
first closure will be used at function call time.
default_value: optional value to return in environments that cannot safely
evaluate closure.
placeholder: optional. If not None, the graph will take the passed-in
`placeholder` as the internal capture instead of creating a new one.
This is useful when loading from a SavedModel.
Returns:
Nest of placeholders which, at function call time, will be fed with the
result of calling closure().
Raises:
ValueError: at function call time, if the return value of closure() is
not compatible with `spec`.
"""
if key is None:
key = object()
if key not in self._deferred_captures:
if placeholder is None:
def convert_to_placeholder(s):
if not isinstance(s, tensor_spec.DenseSpec):
raise TypeError(
"Expected a nest of `TypeSpec` objects, found %s of type %s." %
(s, type(s)))
return array_ops.placeholder(dtype=s.dtype, shape=s.shape)
placeholder = nest.map_structure(
convert_to_placeholder, spec, expand_composites=True)
def wrapped_closure():
# One major case requiring returning a `default_value` is when passing a
# concrete function to `save`, i.e.
# serving_fn = serve_fn.get_concrete_function(...)
# model.save(save_dir, signatures={"serving_default": serving_fn})
# `serving_fn` has deferred captures added through
# `capture_call_time_value`. It can't be saved correctly since
# `wrapped_closure` will end up executing under a default Graph instead
# of FuncGraph. The user of `capture_call_time_value` also cannot
# conditionally avoid this call since presence of `save_context` when
# executing `wrapped_closure` is not known at tracing time of
# `serving_fn`.
if save_context.in_save_context() and default_value is not None:
return default_value
# TODO(wxinyi): raise an error if in save context but no default value.
if not context.executing_eagerly():
graph = ops.get_default_graph()
# In the case of control flow, we need to capture the
# external_captures (deferred or not) of the body_graph (i.e.
# `WhileBodyFuncGraph) in `cond_graph` (i.e. WhileCondFuncGraph) and
# create the corresponding placeholders in `cond_graph` so that it
# expects to receive these as arguments. However, doing so requires
# having evaluated the call_time_value already (and maybe repeatedly),
# so we skip adding deferred_captures to the control flow graph but
# add it to its outer graph.
while graph.is_control_flow_graph:
graph = graph.outer_graph
with graph.as_default():
ret_nest = graph.capture_call_time_value(
closure, spec, key=key, default_value=default_value)
else:
ret_nest = closure()
nest.assert_same_structure(spec, ret_nest, expand_composites=True)
# This uses the tensor dtype defined in `spec` when converting values
# in `ret_nest` to tensors.
# pylint: disable=protected-access
y = nest.map_structure(lambda s, r: s._to_components(r), spec, ret_nest,
expand_composites=False)
# pylint: enable=protected-access
return nest.flatten(y, expand_composites=True)
wrapped_closure.output_spec = spec
self._deferred_captures[key] = (wrapped_closure, placeholder)
return self._deferred_captures[key][1]
def control_dependencies(self, control_inputs):
"""Handles control dependencies.
FuncGraph wraps Graph's control_dependencies logic by first filtering out
any external tensors / operations and storing them in the graph's
control_captures member. Any consumers of this function graph must then
decide how to handle the control captures.
Args:
control_inputs: A list of `Operation` or `Tensor` objects which
must be executed or computed before running the operations
defined in the context. Can also be `None` to clear the control
dependencies.
Returns:
A context manager that specifies control dependencies for all
operations constructed within the context.
Raises:
TypeError: If `control_inputs` is not a list of `Operation` or
`Tensor` objects.
"""
if control_inputs is None:
return super(FuncGraph, self).control_dependencies(control_inputs)
filtered_control_inputs = []
for c in control_inputs:
# Check for _UnreadVariable
if (isinstance(c, indexed_slices.IndexedSlices) or
(hasattr(c, "_handle") and hasattr(c, "op"))):
c = c.op
graph_element = ops._as_graph_element(c) # pylint: disable=protected-access
if graph_element is None:
graph_element = c
if graph_element is not None and getattr(
graph_element, "graph", None) is not self:
self.control_captures.add(graph_element)
else:
filtered_control_inputs.append(graph_element)
return super(FuncGraph, self).control_dependencies(filtered_control_inputs)
def as_default(self):
outer_cm = super(FuncGraph, self).as_default()
@tf_contextlib.contextmanager
def inner_cm():
"""Context manager for copying distribute.Strategy scope information."""
# pylint: disable=protected-access
# TODO(b/112906995, nareshmodi): distribution strategy depends on
# inheriting this stack from the default graph even in eager mode. Maybe
# it should be part of the eager context? This would also allow us to
# remove a get_default_graph() call from the function cache lookup.
graph = ops.get_default_graph()
old_strategy_stack = self._distribution_strategy_stack
self._distribution_strategy_stack = list(
graph._distribution_strategy_stack)
# We ignore device placements from any outer scopes while tracing the
# function when possible, to avoid hard-coding them in the function
# graph. "Default" placements come from the PartitionedCallOp's placement,
# so that the same trace of the Python function may be placed on several
# different devices and saved functions may be placed on new devices when
# restored.
# However, we need to preserve the outer device stack in the following
# cases in non eager context:
# 1. device stack is callable
# 2. When using distribution strategy with legacy graph mode.
old_device_stack = self._device_function_stack
if (not context.executing_eagerly() and
(device_stack_has_callable(graph._device_function_stack) or
(self._distribution_strategy_stack and
not ops.executing_eagerly_outside_functions()))):
# Hard-code devices from device functions in the function body
self._device_function_stack = graph._device_function_stack.copy()
old_creator_stack = self._variable_creator_stack
self._variable_creator_stack = graph._variable_creator_stack
# Inherit the graph key, since this is used for matching variables in
# optimizers.
old_graph_key = self._graph_key
self._graph_key = graph._graph_key
# pylint: enable=protected-access
old_scope_exit_callbacks = self._scope_exit_callbacks
self._scope_exit_callbacks = []
with outer_cm as g:
try:
yield g
finally:
try:
for fn in self._scope_exit_callbacks:
fn()
finally:
self._scope_exit_callbacks = old_scope_exit_callbacks
self._distribution_strategy_stack = old_strategy_stack
self._device_function_stack = old_device_stack
self._variable_creator_stack = old_creator_stack
self._graph_key = old_graph_key
return inner_cm()
@property
def outer_graph(self):
"""The Graph this FuncGraph is nested in.
Functions may capture Tensors from graphs they are nested in (transitive).
Returns:
A Graph object. Initially set to the current default graph when the
FuncGraph was created. If the previous `outer_graph` was deleted because
the function that owns it was deleted, `outer_graph` is reset to the
outermost default graph active when the FuncGraph was created. This
FuncGraph won't have captured anything from the new `outer_graph` (and
likely not from the previous setting, since that would have created a
strong reference), but it is returned so that FuncGraphs always have a
parent.
"""
current = self._weak_outer_graph()
if current is None:
return self._fallback_outer_graph
return current
@outer_graph.setter
def outer_graph(self, new_outer_graph):
"""Sets `outer_graph` to `new_outer_graph`."""
self._weak_outer_graph = weakref.ref(new_outer_graph)
@property
def output_types(self):
return [t.dtype for t in self.outputs]
@property
def output_shapes(self):
return [t.shape for t in self.outputs]
@property
def trainable_variables(self):
"""A sequence of trainable variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of trainable variables for this func graph.
"""
return tuple(v for v in self.variables if v.trainable)
@property
def variables(self):
"""A sequence of variables accessed by this FuncGraph.
Note that functions keep only weak references to variables. Calling the
function after a variable it accesses has been deleted is an error.
Returns:
Sequence of variables for this func graph.
"""
def deref(weak_v):
v = weak_v()
if v is None:
raise AssertionError(
"Called a function referencing variables which have been deleted. "
"This likely means that function-local variables were created and "
"not referenced elsewhere in the program. This is generally a "
"mistake; consider storing variables in an object attribute on "
"first call.")
return v
return tuple(deref(v) for v in self._weak_variables)
@variables.setter
def variables(self, var_list):
self._weak_variables = [weakref.ref(v) for v in var_list]
def _capture_by_value(
self,
op_type,
inputs,
dtypes, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
# When capturing by value, do the read outside
reverse_captures = dict((id(v), k) for k, v in self.captures)
uncaptured_inputs = [reverse_captures.get(id(t), t) for t in inputs]
with ops.init_scope():
if context.executing_eagerly():
attr_list = ("dtype", int(attrs["dtype"].type))
value, = execute.execute(
compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
context.context())
else:
op = ops.get_default_graph()._create_op_internal( # pylint: disable=protected-access
op_type,
uncaptured_inputs,
dtypes,
input_types,
name,
attrs,
op_def,
compute_device)
value = op.outputs[0]
captured_value = self.capture(value)
return captured_value.op
def _create_op_internal(
self,
op_type,
inputs,
dtypes=None, # pylint: disable=redefined-outer-name
input_types=None,
name=None,
attrs=None,
op_def=None,
compute_device=True):
"""Like Graph.create_op, except handles external input tensors.
This overload adds functionality to create_op to "capture" any external
input tensors, i.e. tensors from the eager context or outer function graphs
if this is a nested function. See `capture` for more information.
Args:
op_type: The `Operation` type to create. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
inputs: A list of `Tensor` objects that will be inputs to the `Operation`.
dtypes: (Optional) A list of `DType` objects that will be the types of the
tensors that the operation produces.
input_types: (Optional.) A list of `DType`s that will be the types of
the tensors that the operation consumes. By default, uses the base
`DType` of each input in `inputs`. Operations that expect
reference-typed inputs must specify `input_types` explicitly.
name: (Optional.) A string name for the operation. If not specified, a
name is generated based on `op_type`.
attrs: (Optional.) A dictionary where the key is the attribute name (a
string) and the value is the respective `attr` attribute of the
`NodeDef` proto that will represent the operation (an `AttrValue`
proto).
op_def: (Optional.) The `OpDef` proto that describes the `op_type` that
the operation will have.
compute_device: (Optional.) If True, device functions will be executed
to compute the device property of the Operation.
Returns:
An `Operation` object.
"""
if self.capture_by_value and op_type in ["ReadVariableOp",
"ResourceGather"]:
return self._capture_by_value(op_type, inputs, dtypes, input_types, name,
attrs, op_def, compute_device)
# This capturing logic interacts poorly with control flow contexts which
# want to replace inputs of ops far too late in the process. This can lead
# the context to get confused and try to create an Enter for an Enter. We
# can detect this here and skip the additional Enter which can confuse loop
# validation logic.
if op_type == "Enter" and inputs[0].op.type == "Enter":
if inputs[0].op.get_attr("frame_name") == attrs["frame_name"].s:
return inputs[0].op
# Calling AddValue on the control flow contexts to force creation of the
# backward accumulators in the original graph before we create placeholders
# to capture the inputs.
ctxt = ops.get_default_graph()._control_flow_context # pylint: disable=protected-access
# Use a different list to avoid modifying the original inputs list.
captured_inputs = []
for inp in inputs:
# TPU Estimator defines a control flow context with no AddValue method.
if ctxt is not None and hasattr(ctxt, "AddValue"):
inp = ctxt.AddValue(inp)
inp = self.capture(inp)
captured_inputs.append(inp)
return super(FuncGraph, self)._create_op_internal( # pylint: disable=protected-access
op_type, captured_inputs, dtypes, input_types, name, attrs, op_def,
compute_device)
def capture(self, tensor, name=None, shape=None):
"""Captures `tensor` if it's external to this graph.
If `tensor` is from a different graph, returns a placeholder for it.
`tensor` and the placeholder will appear in self.captures, and the
placeholder will appear in self.inputs. Multiple calls to this method with
the same `tensor` argument will return the same placeholder. If `tensor` is
from this graph, returns `tensor`.
Args:
tensor: Tensor. May be from this FuncGraph or a different graph.
name: Optional name if a placeholder is created.
shape: Optional shape if a placeholder is created.
Returns:
Tensor from this FuncGraph.
Raises:
InaccessibleTensorError: if any tensors are accessed in a manner that
bypasses the mechanisms required for the data dependencies to be correctly
wired.
"""
if isinstance(tensor, ops.EagerTensor):
if name is None:
name = str(ops.uid())
# Small EagerTensors are captured with Const ops
if (tensor.dtype in dtypes.TF_VALUE_DTYPES and
np.prod(tensor.shape) <= _EAGER_CONST_THRESHOLD):
return self.capture_eager_tensor(tensor, name)
# Large EagerTensors and resources are captured with Placeholder ops
return self._capture_helper(tensor, name, shape)
if tensor.graph is not self:
if name is None:
name = tensor.op.name
inner_graph = tensor.graph
while inner_graph is not None and isinstance(inner_graph, FuncGraph):
if inner_graph is self:
try:
tb = tensor.op.traceback
except AttributeError:
tensor_traceback = "<unknown>"
else:
tensor_traceback_list = []
for frame in traceback.format_list(tb.get_user_frames()):
tensor_traceback_list.extend(
[f" {line}" for line in frame.split("\n") if line.strip()])
tensor_traceback = "\n".join(tensor_traceback_list)
# Keep in sync with tfe_wrapper.cc.
# TODO(b/200991648): Unify those two paths.
raise errors.InaccessibleTensorError(
f"{tensor!r} is out of scope and cannot be used here. Use return "
"values, explicit Python locals or TensorFlow collections to "
"access it.\n"
"Please see https://www.tensorflow.org/guide/function#all_outputs_of_a_tffunction_must_be_return_values "
"for more information.\n\n"
f"{tensor!r} was defined here:\n{tensor_traceback}\n\n"
f"The tensor {tensor!r} cannot be accessed from {self}, because "
f"it was defined in {tensor.graph}, which is out of scope.")
inner_graph = inner_graph.outer_graph
return self._capture_helper(tensor, name)
return tensor
def _capture_helper(self, tensor, name, shape=None):
capture = self._captures.get(id(tensor))
if capture is None:
placeholder = _create_substitute_placeholder(
tensor, name=name, dtype=tensor.dtype, shape=shape)
# Record the composite device as an attribute to the placeholder.
# This attribute would be propogated into the arg_attr of the FunctionDef.
# Currently, a packed eager tensor is always placed on a CompositeDevice.
if isinstance(tensor, ops.EagerTensor) and tensor.is_packed:
placeholder.op._set_attr( # pylint: disable=protected-access
"_composite_device",
attr_value_pb2.AttrValue(s=compat.as_bytes(tensor.device)))
self.add_capture(tensor, placeholder)
else:
placeholder = capture[1]
tape.record_operation("captured_value", [placeholder], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return placeholder
@property
def captures(self):
"""Order list of tuples containing external and internal captures."""
return self._captures.values()
def add_capture(self, tensor, placeholder):
"""Capture a specific tensor and utilize the provided placeholder.
Args:
tensor: Tensor to captures.
placeholder: Provided placeholder for the tensor.
"""
self._captures[id(tensor)] = (tensor, placeholder)
self.inputs.append(placeholder)
def replace_capture(self, tensor, placeholder):
"""Replace already existing capture."""
self._captures[id(tensor)] = (tensor, placeholder)
def replace_capture_with_deferred_capture(self,
tensor,
closure,
spec,
placeholder,
default_value=None):
"""Replaces existing capture `tensor` with a deferred capture `closure`.
Caution: It is the caller's responsibility to make sure that, after calling
this function, the TypeSpec of the `inputs` (i.e. internal placeholders) and
the `_captured_inputs` (i.e. external captures) of a concrete function that
wraps this function graph are still compatible. Thus user should pairing
usage of this function with `ConcreteFunction.set_external_captures` to make
sure the order still matches. For example,
```
# concrete_fn._captured_inputs == [tensor1, tensor2, tensor3]
# concrete_fn.inputs == [placeholder1, placeholder2, placeholder3]
# replace external capture `tensor2` with a deferred_capture, i.e., a
# closure, `closure2`
concrete_fn.graph.replace_capture_with_deferred_capture(tensor2,
closure2,
placeholder2,
some_spec,
some_default)
concrete_fn.set_external_captures([tensor1, closure2, tensor3])
```
Args:
tensor: Tensor already captured.
closure: function which takes no arguments, to be evaluated at function
call time, returning a nest of tensors compatible with `spec`.
spec: nest of TypeSpec for the value to capture.
placeholder: the internal placeholder corresponding to the captured
`tensor`.
default_value: optional value to use in environments that cannot safely
evaluate closure.
"""
if id(tensor) in self._captures:
self.pop_capture(tensor)
self.capture_call_time_value(
closure,
spec,
key=id(tensor),
default_value=default_value,
placeholder=placeholder)
def reset_captures(self, capture_list):
"""Set the captures with the provided list of captures & placeholder."""
self._captures = py_collections.OrderedDict()
for tensor, placeholder in capture_list:
self._captures[id(tensor)] = (tensor, placeholder)
def pop_capture(self, tensor):
"""Remove the capture and return the generated placeholder."""
capture = self._captures.pop(id(tensor), None)
if capture is None:
return None
return capture[1]
def clear_captures(self):
# TODO(b/115366440): Delete this method when a custom OrderedDict is added.
# Clearing captures using clear() leaves some cycles around.
while self._captures:
self._captures.popitem()
memory.dismantle_ordered_dict(self._captures)
while self._deferred_captures:
self._deferred_captures.popitem()
memory.dismantle_ordered_dict(self._deferred_captures)
def capture_distributed_variable(self, variable, placeholder):
"""Add given distributed variable to captures with given placeholder."""
self._captures[id(variable)] = (variable, placeholder)
tape.record_operation("captured_value", [placeholder], [variable],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
def capture_eager_tensor(self, tensor, name):
capture = self._captures.get(id(tensor))
if capture is None:
with ops.control_dependencies(None):
constant_value = tensor_util.constant_value(tensor)
if constant_value is None:
# Some eager tensors, e.g. parallel tensors, are not convertible to a
# single constant. We'll use a placeholder for this case.
return self._capture_helper(tensor, name)
graph_const = constant_op.constant(constant_value, dtype=tensor.dtype,
shape=tensor.shape, name=name)
self.add_capture(tensor, graph_const)
else:
graph_const = capture[1]
tape.record_operation("captured_value", [graph_const], [tensor],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return graph_const
def captured(self, tensor):
"""Check if the specified tensor has been captured."""
return id(tensor) in self._captures
@property
def external_captures(self):
"""External tensors captured by this function."""
return [c[0] for c in self._captures.values()]
@property
def internal_captures(self):
"""Placeholders in this function corresponding captured tensors."""
return [c[1] for c in self._captures.values()]
@property
def deferred_external_captures(self):
"""Ordered nest of tensors whose placeholders will be fed at call time."""
return [c[0] for c in self._deferred_captures.values()]
@property
def deferred_internal_captures(self):
"""List of nest of placeholders which at call time will be fed."""
return [c[1] for c in self._deferred_captures.values()]
@property
def variable_captures(self):
"""Map of python object ids of variables to variables which are captured."""
return {
id(self._captures[id(v)][1]): v
for v in self.variables
if id(v) in self._captures
}
def mark_as_unsaveable(self, error_message):
"""Marks this FuncGraph as unsaveable.
Any attempts to export this FuncGraph will raise an error with the specified
message.
Args:
error_message: List or string containing the error message to be raised
when saving this FuncGraph to SavedModel.
"""
self._saveable = False
if isinstance(error_message, str):
error_message = [error_message]
self._saving_errors.update(error_message)
@property
def saveable(self):
"""Returns whether this FuncGraph is saveable."""
return self._saveable
@property
def saving_errors(self):
"""Returns set of errors preventing this FuncGraph from being saved."""
return self._saving_errors
def _add_scope_exit_callback(self, fn):
"""Add a function to call when this graph exits the default scope."""
if not callable(fn):
raise TypeError("fn is not callable: {}".format(fn))
if self._scope_exit_callbacks is None:
raise RuntimeError(
"Attempting to add a scope exit callback, but the default graph is "
"not the context scope graph. Did you forget to call "
"'with graph.as_default(): ...'?")
self._scope_exit_callbacks.append(fn)
# TODO(mdan): Too many threaded arguments. Accept an ACD ctx manager instead.
def func_graph_from_py_func(name,
python_func,
args,
kwargs,
signature=None,
func_graph=None,
autograph=False,
autograph_options=None,
add_control_dependencies=True,
arg_names=None,
op_return_value=None,
collections=None,
capture_by_value=None,
override_flat_arg_shapes=None,
acd_record_initial_resource_uses=False):
"""Returns a `FuncGraph` generated from `python_func`.
Args:
name: an identifier for the function.
python_func: the Python function to trace.
args: the positional args with which the Python function should be called;
ignored if a signature is provided.
kwargs: the keyword args with which the Python function should be called;
ignored if a signature is provided.
signature: a possibly nested sequence of `TensorSpecs` specifying the shapes
and dtypes of the arguments. When a signature is provided, `args` and
`kwargs` are ignored, and `python_func` is traced with Tensors conforming
to `signature`. If `None`, the shapes and dtypes are inferred from the
inputs.
func_graph: Optional. An instance of FuncGraph. If provided, we will use
this graph else a new one is built and returned.
autograph: whether to use autograph to compile `python_func`.
See https://www.tensorflow.org/guide/autograph for more information.
autograph_options: additional knobs to control when `autograph=True`.
See https://www.tensorflow.org/guide/autograph for more information.
add_control_dependencies: If True, automatically adds control dependencies
to ensure program order matches execution order and stateful ops always
execute.
arg_names: Optional list of argument names, used to give input placeholders
recognizable names.
op_return_value: Optional. A Tensor. If set and `python_func` returns
Operations, those return values will be replaced with this value. If not
set, returning an Operation triggers an error.
collections: a dictionary of collections this FuncGraph should start
with. If not specified (None), the FuncGraph will read (but not write to)
the outer graph's collections that are not allowlisted, and both
read and write to the outer graph's collections that are allowlisted.
The current allowlisted collections are the global variables, the
local variables, and the trainable variables.
Defaults to None.
capture_by_value: An optional boolean. If True, the func graph will capture
Variables by value instead of reference. By default inherit from outer
graphs, and failing that will default to False.
override_flat_arg_shapes: An optional list of instances that are either
`None` or `TensorShape`. The length must match that of
`nest.flatten((args, kwargs), expand_composites=True)`. The entries
containing value `None` must match entries in flattened arguments
containing non-tensors, while entries containing a `TensorShape` must
match entries in the flattened arguments containing tensors.
acd_record_initial_resource_uses: If `True` and `add_control_dependencies`
is enabled, the results (those marked with
AutomaticControlDependencies.mark_result) will be annotated with a private
attribute, "_res_first_used_by", which points to the first nodes which
used the any of the resources that the result op is using.
Returns:
A FuncGraph.
Raises:
TypeError: If any of `python_func`'s return values is neither `None`, a
`Tensor` or a `tf.experimental.ExtensionType`.
ValueError: If both `signature` and `override_flat_arg_shapes` are
passed in.
"""
if op_return_value is not None:
assert isinstance(op_return_value, ops.Tensor), op_return_value
if func_graph is None:
func_graph = FuncGraph(name, collections=collections,
capture_by_value=capture_by_value)
assert isinstance(func_graph, FuncGraph)
if add_control_dependencies:
deps_control_manager = auto_control_deps.AutomaticControlDependencies(
record_initial_resource_uses=acd_record_initial_resource_uses)
else:
deps_control_manager = ops.NullContextmanager()
with func_graph.as_default(), deps_control_manager as deps_ctx:
current_scope = variable_scope.get_variable_scope()
default_use_resource = current_scope.use_resource
current_scope.set_use_resource(True)
if signature is not None and override_flat_arg_shapes is not None:
raise ValueError(
"Passed both signature and override_flat_arg_shapes: %s and %s."
% (signature, override_flat_arg_shapes))
if signature is not None:
args = signature
kwargs = {}
# Creates and names placeholders for all arguments.
if override_flat_arg_shapes is not None:
flat_args = nest.flatten(args, expand_composites=True)
arg_shapes = override_flat_arg_shapes[:len(flat_args)]
kwarg_shapes = override_flat_arg_shapes[len(flat_args):]
else:
arg_shapes = None
kwarg_shapes = None
func_args = _get_defun_inputs_from_args(
args, arg_names, flat_shapes=arg_shapes)
func_kwargs = _get_defun_inputs_from_kwargs(
kwargs, flat_shapes=kwarg_shapes)
# Convert all Tensors into TensorSpecs before saving the structured inputs.
# If storing pure concrete functions that are not called through polymorphic
# functions, we don't have access to FunctionSpec, so we need to call the
# TensorSpecs by their `arg_names` for later binding.
func_graph.structured_input_signature = (
convert_structure_to_signature(func_args, arg_names),
convert_structure_to_signature(func_kwargs))
flat_func_args = nest.flatten(func_args, expand_composites=True)
flat_func_kwargs = nest.flatten(func_kwargs, expand_composites=True)
# Temporarily set inputs to allow graph building code to inspect
# them. Reassigned below.
func_graph.inputs = [arg for arg in flat_func_args + flat_func_kwargs
if isinstance(arg, ops.Tensor)]
# Note: `nest.flatten` sorts by keys, as does `_deterministic_dict_values`.
# Variables to help check whether mutation happens in calling the function
# Copy the recursive list, tuple and map structure, but not base objects
func_args_before = nest.pack_sequence_as(func_args, flat_func_args,
expand_composites=True)
func_kwargs_before = nest.pack_sequence_as(
func_kwargs, flat_func_kwargs, expand_composites=True)
def convert(x):
"""Converts a function output to a Tensor."""
if x is None:
return None
if op_return_value is not None and isinstance(x, ops.Operation):
# TODO(b/79881896): we currently can't capture external control deps, so
# this won't work if x needs to be captured (i.e. if python_func returns
# captured Operations).
with ops.control_dependencies([x]):
x = array_ops.identity(op_return_value)
elif not isinstance(x, tensor_array_ops.TensorArray):
try:
x = ops.convert_to_tensor_or_composite(x)
except (ValueError, TypeError):
raise TypeError(
"To be compatible with tf.function, Python functions "
"must return zero or more Tensors or ExtensionTypes or None "
f"values; in compilation of {str(python_func)}, found return "
f"value of type {type(x).__name__}, which is not a Tensor or "
"ExtensionType.")
if add_control_dependencies:
x = deps_ctx.mark_as_return(x)
return x
try:
if autograph:
from tensorflow.python import autograph # pylint: disable=g-import-not-at-top
_, original_func = tf_decorator.unwrap(python_func)
def autograph_handler(*args, **kwargs):
"""Calls a converted version of original_func."""
# TODO(mdan): Push this block higher in tf.function's call stack.
try:
return autograph.converted_call(
original_func,
args,
kwargs,
options=autograph.ConversionOptions(
recursive=True,
optional_features=autograph_options,
user_requested=True,
))
except Exception as e: # pylint:disable=broad-except
if hasattr(e, "ag_error_metadata"):
raise e.ag_error_metadata.to_exception(e)
else:
raise
# Wrapping around a decorator allows checks like tf_inspect.getargspec
# to be accurate.
converted_func = tf_decorator.make_decorator(
original_func, autograph_handler)
python_func = tf_decorator.rewrap(python_func, original_func,
converted_func)
else:
_, original_func = tf_decorator.unwrap(python_func)
func_outputs = python_func(*func_args, **func_kwargs)
# invariant: `func_outputs` contains only Tensors, CompositeTensors,
# TensorArrays and `None`s.
func_outputs = nest.map_structure(convert, func_outputs,
expand_composites=True)
check_mutation(func_args_before, func_args, original_func)
check_mutation(func_kwargs_before, func_kwargs, original_func)
finally:
current_scope.set_use_resource(default_use_resource)
# Variables in `func_args`, `func_kwargs` should be explicit inputs
# to the function, not captured inputs.
graph_variables = list(func_graph._watched_variables) # pylint: disable=protected-access
arg_variables = object_identity.ObjectIdentitySet()
inputs = []
for arg in (nest.flatten(func_args, expand_composites=True) +
nest.flatten(func_kwargs, expand_composites=True)):
if isinstance(arg, resource_variable_ops.BaseResourceVariable):
# Even if an argument variable was not used in the function, we've
# already manually captured the resource Tensor when creating argument
# placeholders.
resource_placeholder = func_graph.pop_capture(arg.handle)
if resource_placeholder is None:
continue
arg_variables.add(arg)
inputs.append(resource_placeholder)
elif isinstance(arg, ops.Tensor):
inputs.append(arg)
variables = [v for v in graph_variables if v not in arg_variables]
func_graph.inputs = (
inputs + func_graph.internal_captures + nest.flatten(
func_graph.deferred_internal_captures, expand_composites=True))
func_graph.structured_outputs = func_outputs
# Returning a closed-over tensor does not trigger convert_to_tensor.
func_graph.outputs.extend(
func_graph.capture(x)
for x in flatten(func_graph.structured_outputs)
if x is not None)
func_graph.variables = variables
if add_control_dependencies:
func_graph.control_outputs.extend(deps_control_manager.ops_which_must_run)
func_graph.collective_manager_ids_used = (
deps_control_manager.collective_manager_ids_used)
return func_graph
def maybe_captured(tensor):
"""If t is a captured value placeholder, returns the original captured value.
Args:
tensor: Tensor.
Returns:
A tensor, potentially from a different Graph/FuncGraph.
"""
if (not isinstance(tensor, ops.EagerTensor) and
tensor.op.graph.building_function and tensor.op.type == "Placeholder"):
for input_t, placeholder_t in tensor.op.graph.captures:
if tensor == placeholder_t:
return maybe_captured(input_t)
# pylint: enable=protected-access
return tensor
def device_stack_has_callable(device_stack):
"""Checks whether a device stack contains a callable."""
return any(callable(spec._device_name_or_function) # pylint: disable=protected-access
for spec in device_stack.peek_objs())
def check_mutation(n1, n2, func):
"""Check if two list of arguments are exactly the same."""
func_name = getattr(func, "__name__", func)
errmsg = ("{}() should not modify its Python input arguments."
" Check if it modifies any lists or dicts passed as"
" arguments. Modifying a copy is allowed.".format(func_name))
try:
# TODO(mdan): Compare more robustly so that argument names can be reported.
nest.assert_same_structure(n1, n2, expand_composites=True)
except ValueError:
raise ValueError(errmsg)
for arg1, arg2 in zip(nest.flatten(n1, expand_composites=True),
nest.flatten(n2, expand_composites=True)):
if arg1 is not arg2:
raise ValueError(errmsg)
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def flatten(sequence):
"""Like nest.flatten w/ expand_composites, but returns flow for TensorArrays.
Args:
sequence: A nested structure of Tensors, CompositeTensors, and
TensorArrays.
Returns:
A list of tensors.
"""
flat_sequence = nest.flatten(sequence, expand_composites=True)
return [
item.flow if isinstance(item, tensor_array_ops.TensorArray) else item
for item in flat_sequence]
# TODO(edloper): If TensorArray becomes a CompositeTensor, then delete this.
def pack_sequence_as(structure, flat_sequence):
"""Like `nest.pack_sequence_as` but also builds TensorArrays from flows.
Args:
structure: The structure to pack into. May contain Tensors,
CompositeTensors, or TensorArrays.
flat_sequence: An iterable containing tensors.
Returns:
A nested structure.
Raises:
AssertionError if `structure` and `flat_sequence` are not compatible.
"""
flat_sequence = list(flat_sequence)
flattened_structure = nest.flatten(structure, expand_composites=True)
if len(flattened_structure) != len(flat_sequence):
raise ValueError("Mismatch in element count")
for i in range(len(flat_sequence)):
if isinstance(flattened_structure[i], tensor_array_ops.TensorArray):
flat_sequence[i] = tensor_array_ops.build_ta_with_new_flow(
old_ta=flattened_structure[i], flow=flat_sequence[i])
return nest.pack_sequence_as(structure, flat_sequence, expand_composites=True)
def _create_substitute_placeholder(value, name=None, dtype=None, shape=None):
"""Creates a placeholder for `value` and propagates shape info to it."""
# Note: setting ops.control_dependencies(None) ensures we always put
# capturing placeholders outside of any control flow context.
if shape is None:
shape = value.shape
with ops.control_dependencies(None):
placeholder = graph_placeholder(
dtype=dtype or value.dtype, shape=shape, name=name)
handle_data_util.copy_handle_data(value, placeholder)
return placeholder
def _get_defun_inputs_from_args(args, names, flat_shapes=None):
"""Maps Python function positional args to graph-construction inputs."""
return _get_defun_inputs(
args, names, structure=args, flat_shapes=flat_shapes)
def _get_composite_tensor_spec(x):
"""Returns the TypeSpec for x if it's a composite tensor, or x otherwise."""
return (x._type_spec # pylint: disable=protected-access
if isinstance(x, composite_tensor.CompositeTensor) else x)
def _get_defun_inputs(args, names, structure, flat_shapes=None):
"""Maps python function args to graph-construction inputs.
Args:
args: A flat list of user-specified arguments.
names: A list of strings with user-specified argument names, same length as
`args`. May be `None`, in which case a generic name is used.
structure: The original argument list or dictionary.
flat_shapes: A flat list of values that are either `None` or
instances of `TensorShape`. If provided, then length must match
that of `nest.flatten(args, expand_composites=True)`; and locations where
`args` are instances of `Tensor` must have a corresponding `TensorShape`
in `flat_shapes`. May be `None`, in which case exact shapes are read
directly from the args.
Returns:
Placeholders with the same structure as `structure`.
Raises:
RuntimeError: if `flat_shapes` is provided, but
`len(flat_shapes) != len(nest.flatten(args, expand_composites=True))`.
RuntimeError: if a shape from `flat_shapes` is not None
for an argument that is not a `Tensor`, `TensorSpec`,
or `ResourceVariable`.
"""
func_graph = ops.get_default_graph()
function_inputs = []
if names is None:
names = [None] * len(args)
if flat_shapes is None:
shapes_iter = itertools.repeat(None)
else:
len_flat_args = len(nest.flatten(args, expand_composites=True))
if len_flat_args != len(flat_shapes):
raise RuntimeError(
"Length of fully flat shapes (%d) must match that of "
"flatten(args) (%d). args: %s, flat_shapes: %s"
% (len(flat_shapes),
len_flat_args,
args,
flat_shapes))
shapes_iter = iter(flat_shapes)
for arg_value, name in zip(args, names):
# Replace any composite tensors with their TypeSpecs. This is important
# for ensuring that shape information that's not preserved by the TypeSpec
# (such as the number of values in a SparseTensor) gets properly masked.
arg_value = nest.map_structure(_get_composite_tensor_spec, arg_value)
flattened = nest.flatten(arg_value, expand_composites=True)
for arg in flattened:
# We have a shape entry for each arg, regardless of whether it's a real
# Tensor or not. For non-tensor entries it should be None.
shape = next(shapes_iter)
if isinstance(arg, (ops.Tensor, tensor_spec.TensorSpec)):
arg_is_spec = isinstance(arg, tensor_spec.TensorSpec)
if arg_is_spec and arg.name:
requested_name = arg.name
else:
requested_name = name
placeholder_shape = shape if shape is not None else arg.shape
try:
placeholder = graph_placeholder(
arg.dtype, placeholder_shape,
name=requested_name)
except ValueError:
# Sometimes parameter names are not valid op names, so fall back to
# unnamed placeholders.
placeholder = graph_placeholder(arg.dtype, placeholder_shape)
if not arg_is_spec:
handle_data_util.copy_handle_data(arg, placeholder)
if name is not None:
# Record the requested/user-specified name in case it's different than
# the uniquified name, for validation when exporting signatures.
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(requested_name)))
function_inputs.append(placeholder)
elif isinstance(arg, (resource_variable_ops.BaseResourceVariable,
resource_variable_ops.VariableSpec)):
if isinstance(arg, resource_variable_ops.VariableSpec):
name = arg.name or name
with func_graph.outer_graph.as_default():
placeholder = graph_placeholder(dtypes.resource, arg.shape,
name=name)
arg = resource_variable_ops.BaseResourceVariable(
name=name,
shape=arg.shape,
dtype=arg.dtype,
handle=placeholder,
handle_name=name,
trainable=arg.trainable)
# Capture arg variables to create placeholders for them. These will be
# removed as captures after the function is traced (since otherwise we'd
# just add it back with a new placeholder when the variable was
# referenced).
placeholder = func_graph.capture(arg.handle, name=name)
placeholder.op._set_attr( # pylint: disable=protected-access
"_user_specified_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(name)))
function_inputs.append(arg)
else:
if shape is not None:
raise RuntimeError(
"Expected provided shape override to be None for arg that isn't "
"a Tensor, but saw arg: '%s', shape: '%s'. args: %s"
% (arg, shape, args))
function_inputs.append(arg)
return nest.pack_sequence_as(structure, function_inputs,
expand_composites=True)
def _get_defun_inputs_from_kwargs(kwargs, flat_shapes):
"""Maps Python function keyword args to graph-construction inputs."""
if kwargs:
names, args = zip(*sorted(kwargs.items()))
else:
names = []
args = []
return _get_defun_inputs(
args, names, structure=kwargs, flat_shapes=flat_shapes)
def dismantle_func_graph(func_graph):
"""Removes reference cycles in `func_graph` FuncGraph.
Helpful for making sure the garbage collector doesn't need to run when
the FuncGraph goes out of scope, e.g. in tests using defun with
@test_util.run_in_graph_and_eager_modes(assert_no_eager_garbage=True).
Args:
func_graph: A `FuncGraph` object to destroy. `func_graph` is unusable
after this function.
"""
func_graph.clear_captures()
ops.dismantle_graph(func_graph)
def override_func_graph_name_scope(func_graph, name_scope):
func_graph._name_stack = name_scope # pylint: disable=protected-access
| 42.217958
| 119
| 0.685759
|
f98dd2e8b9d01113d626f6ef200ebc97c991b309
| 11,435
|
py
|
Python
|
scipy/misc/common.py
|
jiffyclub/scipy
|
e346aa55c0416b915148c35cc200a0ed74f85c0a
|
[
"BSD-3-Clause"
] | 1
|
2020-11-07T04:53:55.000Z
|
2020-11-07T04:53:55.000Z
|
scipy/misc/common.py
|
jiffyclub/scipy
|
e346aa55c0416b915148c35cc200a0ed74f85c0a
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/misc/common.py
|
jiffyclub/scipy
|
e346aa55c0416b915148c35cc200a0ed74f85c0a
|
[
"BSD-3-Clause"
] | 1
|
2019-08-13T21:23:57.000Z
|
2019-08-13T21:23:57.000Z
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from __future__ import division, print_function, absolute_import
import numpy
from numpy import (exp, log, asarray, arange, newaxis, hstack, product, array,
zeros, eye, poly1d, r_, rollaxis, sum, fromstring, isfinite,
squeeze, amax, reshape)
from scipy.lib._version import NumpyVersion
__all__ = ['logsumexp', 'central_diff_weights', 'derivative', 'pade', 'lena',
'ascent', 'face']
_NUMPY_170 = (NumpyVersion(numpy.__version__) >= NumpyVersion('1.7.0'))
def logsumexp(a, axis=None, b=None, keepdims=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed. Tuple of ints is not accepted if NumPy
version is lower than 1.7.0.
.. versionadded:: 0.11.0
keepdims: bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`.
.. versionadded:: 0.12.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
Numpy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> from scipy.misc import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
"""
a = asarray(a)
# keepdims is available in numpy.sum and numpy.amax since NumPy 1.7.0
#
# Because SciPy supports versions earlier than 1.7.0, we have to handle
# those old versions differently
if not _NUMPY_170:
# When support for Numpy < 1.7.0 is dropped, this implementation can be
# removed. This implementation is a bit hacky. Similarly to old NumPy's
# sum and amax functions, 'axis' must be an integer or None, tuples and
# lists are not supported. Although 'keepdims' is not supported by these
# old NumPy's functions, this function supports it.
# Solve the shape of the reduced array
if axis is None:
sh_keepdims = (1,) * a.ndim
else:
sh_keepdims = list(a.shape)
sh_keepdims[axis] = 1
a_max = amax(a, axis=axis)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
out = log(sum(b * exp(a - reshape(a_max, sh_keepdims)), axis=axis))
else:
out = log(sum(exp(a - reshape(a_max, sh_keepdims)), axis=axis))
out += a_max
if keepdims:
# Put back the reduced axes with size one
out = reshape(out, sh_keepdims)
else:
# This is a more elegant implementation, requiring NumPy >= 1.7.0
a_max = amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~isfinite(a_max)] = 0
elif not isfinite(a_max):
a_max = 0
if b is not None:
b = asarray(b)
out = log(sum(b * exp(a - a_max), axis=axis, keepdims=keepdims))
else:
out = log(sum(exp(a - a_max), axis=axis, keepdims=keepdims))
if not keepdims:
a_max = squeeze(a_max, axis=axis)
out += a_max
return out
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the `n`-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which `n`-th derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def f(x):
... return x**3 + x**2
...
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n == 1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n == 2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""
Return Pade approximation to a polynomial as the ratio of two polynomials.
Parameters
----------
an : (N,) array_like
Taylor series coefficients.
m : int
The order of the returned approximating polynomials.
Returns
-------
p, q : Polynomial class
The pade approximation of the polynomial defined by `an` is
`p(x)/q(x)`.
Examples
--------
>>> from scipy import misc
>>> e_exp = [1.0, 1.0, 1.0/2.0, 1.0/6.0, 1.0/24.0, 1.0/120.0]
>>> p, q = misc.pade(e_exp, 2)
>>> e_exp.reverse()
>>> e_poly = np.poly1d(e_exp)
Compare ``e_poly(x)`` and the pade approximation ``p(x)/q(x)``
>>> e_poly(1)
2.7166666666666668
>>> p(1)/q(1)
2.7179487179487181
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N - m
if n < 0:
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1, n+1)
Bkj = zeros((N+1, m), 'd')
for row in range(1, m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1, N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj, Bkj))
pq = linalg.solve(C, an)
p = pq[:n+1]
q = r_[1.0, pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(pickle.load(f))
f.close()
return lena
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
Parameters
----------
gray : bool, optional
If True then return color image, otherwise return an 8-bit gray-scale
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
230
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = fromstring(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
| 26.655012
| 93
| 0.562221
|
a49bb07d9f8d9630e4bb14589ed99d907bf87ff7
| 33,780
|
py
|
Python
|
gempy/library/astrotools.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 19
|
2017-10-23T14:52:51.000Z
|
2022-03-28T04:49:00.000Z
|
gempy/library/astrotools.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 194
|
2017-11-01T17:32:45.000Z
|
2022-03-31T21:32:59.000Z
|
gempy/library/astrotools.py
|
DBerke/DRAGONS
|
cecf9a03970af95126bd17a227bd5214a5d6c64b
|
[
"BSD-3-Clause"
] | 16
|
2017-11-01T05:18:04.000Z
|
2021-12-14T23:08:57.000Z
|
# Copyright(c) 2006,2016-2020 Association of Universities for Research in Astronomy, Inc.
"""
The astroTools module contains astronomy specific utility functions
"""
import os
import re
import numpy as np
from astropy import units as u
def array_from_list(list_of_quantities, unit=None):
"""
Convert a list of Quantity objects to a numpy array. The elements of the
input list must all be converted to the same units.
Parameters
----------
list_of_quantities: list
Quantities objects that all have equivalencies
Returns
-------
array: array representation of this list
"""
if unit is None:
unit = list_of_quantities[0].unit
values = [x.to(unit).value for x in list_of_quantities]
# subok=True is needed to handle magnitude/log units
return u.Quantity(np.array(values), unit, subok=True)
def boxcar(data, operation=np.ma.median, size=1):
"""
"Smooth" a 1D array by applying a boxcar filter along it. Any operation
can be performed, as long as it can take a sequence and return a single
value.
Parameters
----------
data: 1D ndarray
the data to be maninpulated
operation: callable
function for the boxcar to use
size: int
the boxcar width will be 2*size+1
Returns
-------
1D ndarray: same shape as data, after the boxcar operation
"""
try:
boxarray = np.array([operation(data[max(i-size, 0):i+size+1])
for i in range(len(data))])
except (ValueError, TypeError): # Handle things like np.logical_and
boxarray = np.array([operation.reduce(data[max(i-size, 0):i+size+1])
for i in range(len(data))])
return boxarray
def divide0(numerator, denominator):
"""
Perform division, replacing division by zero with zero. This expands
on the np.divide() function by having to deal with cases where either
the numerator and/or denominator might be scalars, rather than arrays,
and also deals with cases where they might be integer types.
Parameters
----------
numerator: float/array-like
the numerator for the division
denominator: float/array-like
the denominator for the division
Returns
-------
The quotient, with instances where the denominator is zero replace by zero
"""
try:
is_int = np.issubdtype(denominator.dtype, np.integer)
except AttributeError:
# denominator is a scalar
if denominator == 0:
try:
return np.zeros(numerator.shape)
except AttributeError:
# numerator is also a scalar
return 0
else:
return numerator / denominator
else:
dtype = np.float32 if is_int else denominator.dtype
try:
out_shape = numerator.shape
except:
out_shape = denominator.shape
else:
# both are arrays so the final shape will be the one with the
# higher dimensionality (if they're broadcastable)
if len(out_shape) < len(denominator.shape):
out_shape = denominator.shape
return np.divide(numerator, denominator, out=np.zeros(out_shape, dtype=dtype),
where=abs(denominator) > np.finfo(dtype).tiny)
def cartesian_regions_to_slices(regions):
"""
Convert a sample region(s) string, consisting of a comma-separated list
of (colon-or-hyphen-separated) pixel ranges into Python slice objects.
These ranges may describe either multiple 1D regions or a single higher-
dimensional region (with one range per axis), a distinction which is not
important here. The ranges are specified in 1-indexed Cartesian pixel
co-ordinates, inclusive of the upper limit, and get converted to a tuple
of Python slice objects (in reverse order), suitable for indexing a
:mod:`numpy` array. If regions is None or empty, the resulting slice will
select the entire array. Single indices may be used (eg. '1,2'), or '*'
for the whole axis, lower and/or upper limits may be omitted to use the
remainder of the range (eg. '10:,:') and/or an optional step may be
specified using colon syntax (eg. '1:10:2' or '1-10:2').
"""
origin = 1
slices = []
ranges = parse_user_regions(regions, allow_step=True)
for limits in ranges[::-1]: # reverse Cartesian order for Python
nlim = len(limits)
if nlim == 1:
lim = int(limits[0])-origin
sliceobj = slice(lim, lim+1)
else:
# Adjust only the lower limit for 1-based indexing since Python
# ranges are exclusive:
sliceobj = slice(*(int(lim)-adj if lim else None
for lim, adj in zip(limits, (origin, 0, 0))))
slices.append(sliceobj)
return tuple(slices)
def parse_user_regions(regions, dtype=int, allow_step=False):
"""
Parse a string containing a list of sections into a list of tuples
containing the same information
Parameters
----------
regions : str
comma-separated list of regions of form start:stop:step
dtype : dtype
string values will be coerced into this dtype, raising an error if
this is not possible
allow_step : bool
allow a step value in the ranges?
Returns
-------
list of slice-like tuples with 2 or 3 values per tuple
"""
if not regions:
return [(None, None)]
elif not isinstance(regions, str):
raise TypeError(f"regions must be a string or None, not '{regions}'")
if isinstance(dtype, np.dtype):
dtype = getattr(np, dtype.name)
ranges = []
for range_ in regions.strip("[]").split(","):
range_ = range_.strip()
if range_ == "*":
ranges.append((None, None))
continue
try:
values = [dtype(x) if x else None
for x in range_.replace("-", ":", 1).split(":")]
assert len(values) in (1, 2, 2+allow_step)
if len(values) > 1 and values[0] is not None and values[1] is not None and values[0] > values[1]:
values[0], values[1] = values[1], values[0]
except (ValueError, AssertionError):
raise ValueError(f"Failed to parse sample regions '{regions}'")
ranges.append(tuple(values))
return ranges
def create_mask_from_regions(points, regions=None):
"""
Produce a boolean mask given an array of x-values and a list of unmasked
regions. The regions can be specified either as slice objects (interpreted
as pixel indices in the standard python sense) or (start, end) tuples with
inclusive boundaries.
Parameters
----------
points : `numpy.ndarray`
Input array
regions : list, optional
valid regions, either (start, end) or slice objects
Returns
-------
mask : boolean `numpy.ndarray`
"""
mask = np.ones_like(points, dtype=bool)
if regions:
for region in regions:
if isinstance(region, slice):
mask[region] = False
else:
x1 = min(points) if region[0] is None else region[0]
x2 = max(points) if region[1] is None else region[1]
if x1 > x2:
x1, x2 = x2, x1
mask[np.logical_and(points >= x1, points <= x2)] = False
return mask
def get_corners(shape):
"""
This is a recursive function to calculate the corner indices
of an array of the specified shape.
:param shape: length of the dimensions of the array
:type shape: tuple of ints, one for each dimension
"""
if not type(shape) == tuple:
raise TypeError('get_corners argument is non-tuple')
if len(shape) == 1:
corners = [(0,), (shape[0]-1,)]
else:
shape_less1 = shape[1:len(shape)]
corners_less1 = get_corners(shape_less1)
corners = []
for corner in corners_less1:
newcorner = (0,) + corner
corners.append(newcorner)
newcorner = (shape[0]-1,) + corner
corners.append(newcorner)
return corners
def get_spline3_extrema(spline):
"""
Find the locations of the minima and maxima of a cubic spline.
Parameters
----------
spline: a callable spline object
Returns
-------
minima, maxima: 1D arrays
"""
derivative = spline.derivative()
try:
knots = derivative.get_knots()
except AttributeError: # for BSplines
knots = derivative.k
minima, maxima = [], []
# We take each pair of knots and map to interval [-1,1]
for xm, xp in zip(knots[:-1], knots[1:]):
ym, y0, yp = derivative([xm, 0.5*(xm+xp), xp])
# a*x^2 + b*x + c
a = 0.5 * (ym+yp) - y0
b = 0.5 * (yp-ym)
c = y0
for root in np.roots([a, b, c]):
if np.isreal(root) and abs(root) <= 1:
x = 0.5 * (root * (xp-xm) + (xp+xm)) # unmapped from [-1, 1]
if 2*a*root + b > 0:
minima.append(x)
else:
maxima.append(x)
return np.array(minima), np.array(maxima)
def transpose_if_needed(*args, transpose=False, section=slice(None)):
"""
This function takes a list of arrays and returns them (or a section of them),
either untouched, or transposed, according to the parameter.
Parameters
----------
args : sequence of arrays
The input arrays.
transpose : bool
If True, return transposed versions.
section : slice object
Section of output data to return.
Returns
-------
list of arrays
The input arrays, or their transposed versions.
"""
return list(None if arg is None
else arg.T[section] if transpose else arg[section] for arg in args)
def rotate_2d(degs):
"""
Little helper function to return a basic 2-D rotation matrix.
:param degs: rotation amount, in degrees
:type degs: float
"""
rads = np.radians(degs)
sine = np.sin(rads)
cosine = np.cos(rads)
return np.array([[cosine, -sine], [sine, cosine]])
def clipped_mean(data):
num_total = len(data)
mean = data.mean()
sigma = data.std()
if num_total < 3:
return mean, sigma
num = num_total
clipped_data = data
clip = 0
while num > 0.5 * num_total:
# CJS: edited this as upper limit was mean+1*sigma => bias
clipped_data = data[(data < mean + 3*sigma) & (data > mean - 3*sigma)]
num = len(clipped_data)
if num > 0:
mean = clipped_data.mean()
sigma = clipped_data.std()
elif clip == 0:
return mean, sigma
else:
break
clip += 1
if clip > 10:
break
return mean, sigma
def rasextodec(string):
"""
Convert hh:mm:ss.sss to decimal degrees
"""
match_ra = re.match(r"(\d+):(\d+):(\d+\.\d+)", string)
if match_ra:
hours = float(match_ra.group(1))
minutes = float(match_ra.group(2))
secs = float(match_ra.group(3))
minutes += (secs/60.0)
hours += (minutes/60.0)
degrees = hours * 15.0
else:
raise ValueError('Invalid RA string')
return degrees
def degsextodec(string):
"""
Convert [-]dd:mm:ss.sss to decimal degrees
"""
match_dec = re.match(r"(-*)(\d+):(\d+):(\d+\.\d+)", string)
if match_dec:
sign = match_dec.group(1)
if sign == '-':
sign = -1.0
else:
sign = +1.0
degs = float(match_dec.group(2))
minutes = float(match_dec.group(3))
secs = float(match_dec.group(4))
minutes += (secs/60.0)
degs += (minutes/60.0)
degs *= sign
else:
raise ValueError('Invalid Dec string')
return degs
# The following functions and classes were borrowed from STSCI's spectools
# package, currently under development. They might be able to be
# replaced with a direct import of spectools.util if/when it is available
IRAF_MODELS_MAP = {1.: 'chebyshev',
2.: 'legendre',
3.: 'spline3',
4.: 'spline1'}
INVERSE_IRAF_MODELS_MAP = {'chebyshev': 1.,
'legendre': 2.,
'spline3': 3.,
'spline1': 4.}
def get_records(fname):
"""
Read the records of an IRAF database file ionto a python list
Parameters
----------
fname: string
name of an IRAF database file
Returns
-------
A list of records
"""
filehandle = open(fname)
dtb = filehandle.read()
filehandle.close()
records = []
recs = dtb.split('begin')[1:]
records = [Record(r) for r in recs]
return records
def get_database_string(fname):
"""
Read an IRAF database file
Parameters
----------
fname: string
name of an IRAF database file
Returns
-------
the database file as a string
"""
f = open(fname)
dtb = f.read()
f.close()
return dtb
class Record:
"""
A base class for all records - represents an IRAF database record
Attributes
----------
recstr: string
the record as a string
fields: dict
the fields in the record
taskname: string
the name of the task which created the database file
"""
def __init__(self, recstr):
self.recstr = recstr
self.fields = self.get_fields()
self.taskname = self.get_task_name()
def aslist(self):
reclist = self.recstr.split('\n')
reclist = [l.strip() for l in reclist]
out = [reclist.remove(l) for l in reclist if len(l) == 0]
return reclist
def get_fields(self):
# read record fields as an array
fields = {}
flist = self.aslist()
numfields = len(flist)
for i in range(numfields):
line = flist[i]
if line and line[0].isalpha():
field = line.split()
if i+1 < numfields:
if not flist[i+1][0].isalpha():
fields[field[0]] = self.read_array_field(
flist[i:i+int(field[1])+1])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
fields[field[0]] = " ".join(s for s in field[1:])
else:
continue
return fields
def get_task_name(self):
try:
return self.fields['task']
except KeyError:
return None
def read_array_field(self, fieldlist):
# Turn an iraf record array field into a numpy array
fieldline = [l.split() for l in fieldlist[1:]]
# take only the first 3 columns
# identify writes also strings at the end of some field lines
xyz = [l[:3] for l in fieldline]
try:
farr = np.array(xyz)
except:
print("Could not read array field %s" % fieldlist[0].split()[0])
return farr.astype(np.float64)
class IdentifyRecord(Record):
"""
Represents a database record for the longslit.identify task
Attributes
----------
x: array
the X values of the identified features
this represents values on axis1 (image rows)
y: int
the Y values of the identified features
(image columns)
z: array
the values which X maps into
modelname: string
the function used to fit the data
nterms: int
degree of the polynomial which was fit to the data
in IRAF this is the number of coefficients, not the order
mrange: list
the range of the data
coeff: array
function (modelname) coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._flatcoeff = self.fields['coefficients'].flatten()
self.x = self.fields['features'][:, 0]
self.y = self.get_ydata()
self.z = self.fields['features'][:, 1]
####here - ref?
self.zref = self.fields['features'][:, 2]
self.modelname = self.get_model_name()
self.nterms = self.get_nterms()
self.mrange = self.get_range()
self.coeff = self.get_coeff()
def get_model_name(self):
return IRAF_MODELS_MAP[self._flatcoeff[0]]
def get_nterms(self):
return self._flatcoeff[1]
def get_range(self):
low = self._flatcoeff[2]
high = self._flatcoeff[3]
return [low, high]
def get_coeff(self):
return self._flatcoeff[4:]
def get_ydata(self):
image = self.fields['image']
left = image.find('[')+1
right = image.find(']')
section = image[left:right]
if ',' in section:
yind = image.find(',')+1
return int(image[yind:-1])
else:
return int(section)
#xind = image.find('[')+1
#yind = image.find(',')+1
#return int(image[yind:-1])
class FitcoordsRecord(Record):
"""
Represents a database record for the longslit.fitccords task
Attributes
----------
modelname: string
the function used to fit the data
xorder: int
number of terms in x
yorder: int
number of terms in y
xbounds: list
data range in x
ybounds: list
data range in y
coeff: array
function coefficients
"""
def __init__(self, recstr):
super().__init__(recstr)
self._surface = self.fields['surface'].flatten()
self.modelname = IRAF_MODELS_MAP[self._surface[0]]
self.xorder = self._surface[1]
self.yorder = self._surface[2]
self.xbounds = [self._surface[4], self._surface[5]]
self.ybounds = [self._surface[6], self._surface[7]]
self.coeff = self.get_coeff()
def get_coeff(self):
return self._surface[8:]
class IDB:
"""
Base class for an IRAF identify database
Attributes
----------
records: list
a list of all `IdentifyRecord` in the database
numrecords: int
number of records
"""
def __init__(self, dtbstr):
lst = self.aslist(dtbstr)
self.records = [IdentifyRecord(rstr) for rstr in self.aslist(dtbstr)]
self.numrecords = len(self.records)
def aslist(self, dtb):
# return a list of records
# if the first one is a comment remove it from the list
record_list = dtb.split('begin')
try:
rl0 = record_list[0].split('\n')
except:
return record_list
if len(rl0) == 2 and rl0[0].startswith('#') and not rl0[1].strip():
return record_list[1:]
elif len(rl0) == 1 and not rl0[0].strip():
return record_list[1:]
else:
return record_list
class ReidentifyRecord(IDB):
"""
Represents a database record for the onedspec.reidentify task
"""
def __init__(self, databasestr):
super().__init__(databasestr)
self.x = np.array([r.x for r in self.records])
self.y = self.get_ydata()
self.z = np.array([r.z for r in self.records])
def get_ydata(self):
y = np.ones(self.x.shape)
y = y * np.array([r.y for r in self.records])[:, np.newaxis]
return y
# This class pulls together fitcoords and identify databases into
# a single entity that can be written to or read from disk files
# or pyfits binary tables
class SpectralDatabase:
def __init__(self, database_name=None, record_name=None,
binary_table=None):
"""
database_name is the name of the database directory
on disk that contains the database files associated with
record_name. For example, database_name="database",
record_name="image_001" (corresponding to the first science
extention in a data file called image.fits
"""
self.database_name = database_name
self.record_name = record_name
self.binary_table = binary_table
self.identify_database = None
self.fitcoords_database = None
# Initialize from database on disk
if database_name is not None and record_name is not None:
if not os.path.isdir(database_name):
raise OSError('Database directory %s does not exist' %
database_name)
# Read in identify database
db_filename = "%s/id%s" % (database_name, record_name)
if not os.access(db_filename, os.R_OK):
raise OSError("Database file %s does not exist " \
"or cannot be accessed" % db_filename)
db_str = get_database_string(db_filename)
self.identify_database = IDB(db_str)
# Read in fitcoords database
db_filename = "%s/fc%s" % (database_name, record_name)
if not os.access(db_filename, os.R_OK):
raise OSError("Database file %s does not exist " \
"or cannot be accessed" % db_filename)
db_str = get_database_string(db_filename)
self.fitcoords_database = FitcoordsRecord(db_str)
# Initialize from pyfits binary table in memory
elif binary_table is not None:
# Get record_name from header if not passed
if record_name is not None:
self.record_name = record_name
else:
self.record_name = binary_table.header["RECORDNM"]
# Format identify information from header and table
# data into a database string
db_str = self._identify_db_from_table(binary_table)
self.identify_database = IDB(db_str)
# Format fitcoords information from header
# into a database string
db_str = self._fitcoords_db_from_table(binary_table)
self.fitcoords_database = FitcoordsRecord(db_str)
else:
raise TypeError("Both database and binary table are None.")
def _identify_db_from_table(self, tab):
# Get feature information from table data
features = tab.data
nrows = len(features)
nfeat = features["spectral_coord"].shape[1]
ncoeff = features["fit_coefficients"].shape[1]
db_str = ""
for row in range(nrows):
feature = features[row]
# Make a dictionary to hold information gathered from
# the table. This structure is not quite the same as
# the fields member of the Record class, but it is the
# same principle
fields = {}
fields["id"] = self.record_name
fields["task"] = "identify"
fields["image"] = "%s[*,%d]" % (self.record_name,
feature["spatial_coord"])
fields["units"] = tab.header["IDUNITS"]
zip_feature = np.array([feature["spectral_coord"],
feature["fit_wavelength"],
feature["ref_wavelength"]])
fields["features"] = zip_feature.swapaxes(0, 1)
fields["function"] = tab.header["IDFUNCTN"]
fields["order"] = tab.header["IDORDER"]
fields["sample"] = tab.header["IDSAMPLE"]
fields["naverage"] = tab.header["IDNAVER"]
fields["niterate"] = tab.header["IDNITER"]
reject = tab.header["IDREJECT"].split()
fields["low_reject"] = float(reject[0])
fields["high_reject"] = float(reject[1])
fields["grow"] = tab.header["IDGROW"]
# coefficients is a list of numbers with the following elements:
# 0: model number (function type)
# 1: order
# 2: x min
# 3: x max
# 4 on: function coefficients
coefficients = []
model_num = INVERSE_IRAF_MODELS_MAP[fields["function"]]
coefficients.append(model_num)
coefficients.append(fields["order"])
idrange = tab.header["IDRANGE"].split()
coefficients.append(float(idrange[0]))
coefficients.append(float(idrange[1]))
fit_coeff = feature["fit_coefficients"].tolist()
coefficients.extend(fit_coeff)
fields["coefficients"] = np.array(coefficients).astype(np.float64)
# Compose fields into a single string
rec_str = "%-8s%-8s %s\n" % \
("begin", fields["task"], fields["image"])
for field in ["id", "task", "image", "units"]:
rec_str += "%-8s%-8s%s\n" % ("", field, str(fields[field]))
rec_str += "%-8s%-8s %d\n" % \
("", "features", len(fields["features"]))
for feat in fields["features"]:
rec_str += "%16s%10f %10f %10f\n" % \
("", feat[0], feat[1], feat[2])
for field in ["function", "order", "sample",
"naverage", "niterate", "low_reject",
"high_reject", "grow"]:
rec_str += "%-8s%s %s\n" % ("", field, str(fields[field]))
rec_str += "%-8s%-8s %d\n" % ("", "coefficients",
len(fields["coefficients"]))
for coeff in fields["coefficients"]:
rec_str += "%-8s%-8s%E\n" % ("", "", coeff)
rec_str += "\n"
db_str += rec_str
return db_str
def _fitcoords_db_from_table(self, tab):
# Make a dictionary to hold information gathered from
# the table. This structure is not quite the same as
# the fields member of the Record class, but it is the
# same principle
fields = {}
fields["begin"] = self.record_name
fields["task"] = "fitcoords"
fields["axis"] = tab.header["FCAXIS"]
fields["units"] = tab.header["FCUNITS"]
# The surface is a list of numbers with the following elements:
# 0: model number (function type)
# 1: x order
# 2: y order
# 3: cross-term type (always 1. for fitcoords)
# 4. xmin
# 5: xmax
# 6. xmin
# 7: xmax
# 8 on: function coefficients
surface = []
model_num = INVERSE_IRAF_MODELS_MAP[tab.header["FCFUNCTN"]]
surface.append(model_num)
xorder = tab.header["FCXORDER"]
yorder = tab.header["FCYORDER"]
surface.append(xorder)
surface.append(yorder)
surface.append(1.)
fcxrange = tab.header["FCXRANGE"].split()
surface.append(float(fcxrange[0]))
surface.append(float(fcxrange[1]))
fcyrange = tab.header["FCYRANGE"].split()
surface.append(float(fcyrange[0]))
surface.append(float(fcyrange[1]))
for i in range(int(xorder)*int(yorder)):
coeff = tab.header["FCCOEF%d" % i]
surface.append(coeff)
fields["surface"] = np.array(surface).astype(np.float64)
# Compose fields into a single string
db_str = "%-8s%s\n" % ("begin", fields["begin"])
for field in ["task", "axis", "units"]:
db_str += "%-8s%-8s%s\n" % ("", field, str(fields[field]))
db_str += "%-8s%-8s%d\n" % ("", "surface", len(fields["surface"]))
for coeff in fields["surface"]:
db_str += "%-8s%-8s%E\n" % ("", "", coeff)
return db_str
def write_to_disk(self, database_name=None, record_name=None):
# Check for provided names; use names from self if not
# provided as input
if database_name is None and self.database_name is None:
raise TypeError("No database_name provided")
elif database_name is None and self.database_name is not None:
database_name = self.database_name
if record_name is None and self.record_name is None:
raise TypeError("No record_name provided")
elif record_name is None and self.record_name is not None:
record_name = self.record_name
# Make the directory if needed
if not os.path.exists(database_name):
os.mkdir(database_name)
# Timestamp
import datetime
timestamp = str(datetime.datetime.now())
# Write identify files
id_db = self.identify_database
if id_db is not None:
db_filename = "%s/id%s" % (database_name, record_name)
db_file = open(db_filename, "w")
db_file.write("# "+timestamp+"\n")
for record in id_db.records:
db_file.write("begin")
db_file.write(record.recstr)
db_file.close()
# Write fitcoords files
fc_db = self.fitcoords_database
if fc_db is not None:
db_filename = "%s/fc%s" % (database_name, record_name)
db_file = open(db_filename, "w")
db_file.write("# "+timestamp+"\n")
db_file.write(fc_db.recstr)
db_file.close()
def as_binary_table(self, record_name=None):
# Should this be lazy loaded?
import astropy.io.fits as pf
if record_name is None:
record_name = self.record_name
# Get the maximum number of features identified in any
# record. Use this as the length of the array in the
# wavelength_coord and fit_wavelength fields
nfeat = max([len(record.x)
for record in self.identify_database.records])
# The number of coefficients should be the same for all
# records, so take the value from the first record
ncoeff = self.identify_database.records[0].nterms
# Get the number of rows from the number of identify records
nrows = self.identify_database.numrecords
# Create pyfits Columns for the table
column_formats = [{"name":"spatial_coord", "format":"I"},
{"name":"spectral_coord", "format":"%dE"%nfeat},
{"name":"fit_wavelength", "format":"%dE"%nfeat},
{"name":"ref_wavelength", "format":"%dE"%nfeat},
{"name":"fit_coefficients", "format":"%dE"%ncoeff},]
columns = [pf.Column(**fmt) for fmt in column_formats]
# Make the empty table. Use the number of records in the
# database as the number of rows
table = pf.new_table(columns, nrows=nrows)
# Populate the table from the records
for i in range(nrows):
record = self.identify_database.records[i]
row = table.data[i]
row["spatial_coord"] = record.y
row["fit_coefficients"] = record.coeff
if len(row["spectral_coord"]) != len(record.x):
row["spectral_coord"][:len(record.x)] = record.x
row["spectral_coord"][len(record.x):] = -999
else:
row["spectral_coord"] = record.x
if len(row["fit_wavelength"]) != len(record.z):
row["fit_wavelength"][:len(record.z)] = record.z
row["fit_wavelength"][len(record.z):] = -999
else:
row["fit_wavelength"] = record.z
if len(row["ref_wavelength"]) != len(record.zref):
row["ref_wavelength"][:len(record.zref)] = record.zref
row["ref_wavelength"][len(record.zref):] = -999
else:
row["ref_wavelength"] = record.zref
# Store the record name in the header
table.header.update("RECORDNM", record_name)
# Store other important values from the identify records in the header
# These should be the same for all records, so take values
# from the first record
first_record = self.identify_database.records[0]
table.header.update("IDUNITS", first_record.fields["units"])
table.header.update("IDFUNCTN", first_record.modelname)
table.header.update("IDORDER", first_record.nterms)
table.header.update("IDSAMPLE", first_record.fields["sample"])
table.header.update("IDNAVER", first_record.fields["naverage"])
table.header.update("IDNITER", first_record.fields["niterate"])
table.header.update("IDREJECT", "%s %s" %
(first_record.fields["low_reject"],
first_record.fields["high_reject"]))
table.header.update("IDGROW", first_record.fields["grow"])
table.header.update("IDRANGE", "%s %s" %
(first_record.mrange[0], first_record.mrange[1]))
# Store fitcoords information in the header
fc_record = self.fitcoords_database
table.header.update("FCUNITS", fc_record.fields["units"])
table.header.update("FCAXIS", fc_record.fields["axis"])
table.header.update("FCFUNCTN", fc_record.modelname)
table.header.update("FCXORDER", fc_record.xorder)
table.header.update("FCYORDER", fc_record.yorder)
table.header.update("FCXRANGE", "%s %s" %
(fc_record.xbounds[0], fc_record.xbounds[1]))
table.header.update("FCYRANGE", "%s %s" %
(fc_record.ybounds[0], fc_record.ybounds[1]))
for i in range(len(fc_record.coeff)):
coeff = fc_record.coeff[i]
table.header.update("FCCOEF%d" % i, coeff)
####here -- comments
return table
| 33.915663
| 109
| 0.575281
|
04d8ea946da97e3a4967e6e616db1683738f0548
| 8,345
|
py
|
Python
|
tests/test_evoformer.py
|
yuzhiguo07/openfold
|
5fb0f074066387b9969578b8bf68f7e046c778af
|
[
"Apache-2.0"
] | 1
|
2021-12-16T17:02:10.000Z
|
2021-12-16T17:02:10.000Z
|
tests/test_evoformer.py
|
yuzhiguo07/openfold
|
5fb0f074066387b9969578b8bf68f7e046c778af
|
[
"Apache-2.0"
] | null | null | null |
tests/test_evoformer.py
|
yuzhiguo07/openfold
|
5fb0f074066387b9969578b8bf68f7e046c778af
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 AlQuraishi Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import unittest
from openfold.model.evoformer import (
MSATransition,
EvoformerStack,
ExtraMSAStack,
)
from openfold.utils.tensor_utils import tree_map
import tests.compare_utils as compare_utils
from tests.config import consts
if compare_utils.alphafold_is_installed():
alphafold = compare_utils.import_alphafold()
import jax
import haiku as hk
class TestEvoformerStack(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c_z = consts.c_z
c_hidden_msa_att = 12
c_hidden_opm = 17
c_hidden_mul = 19
c_hidden_pair_att = 14
c_s = consts.c_s
no_heads_msa = 3
no_heads_pair = 7
no_blocks = 2
transition_n = 2
msa_dropout = 0.15
pair_stack_dropout = 0.25
inf = 1e9
eps = 1e-10
es = EvoformerStack(
c_m,
c_z,
c_hidden_msa_att,
c_hidden_opm,
c_hidden_mul,
c_hidden_pair_att,
c_s,
no_heads_msa,
no_heads_pair,
no_blocks,
transition_n,
msa_dropout,
pair_stack_dropout,
blocks_per_ckpt=None,
inf=inf,
eps=eps,
).eval()
m = torch.rand((batch_size, n_seq, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
msa_mask = torch.randint(0, 2, size=(batch_size, n_seq, n_res))
pair_mask = torch.randint(0, 2, size=(batch_size, n_res, n_res))
shape_m_before = m.shape
shape_z_before = z.shape
m, z, s = es(
m, z, chunk_size=4, msa_mask=msa_mask, pair_mask=pair_mask
)
self.assertTrue(m.shape == shape_m_before)
self.assertTrue(z.shape == shape_z_before)
self.assertTrue(s.shape == (batch_size, n_res, c_s))
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_ei(activations, masks):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
ei = alphafold.model.modules.EvoformerIteration(
c_e, config.model.global_config, is_extra_msa=False
)
return ei(activations, masks, is_training=False)
f = hk.transform(run_ei)
n_res = consts.n_res
n_seq = consts.n_seq
activations = {
"msa": np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32),
"pair": np.random.rand(n_res, n_res, consts.c_z).astype(np.float32),
}
masks = {
"msa": np.random.randint(0, 2, (n_seq, n_res)).astype(np.float32),
"pair": np.random.randint(0, 2, (n_res, n_res)).astype(np.float32),
}
params = compare_utils.fetch_alphafold_module_weights(
"alphafold/alphafold_iteration/evoformer/evoformer_iteration"
)
params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)
key = jax.random.PRNGKey(42)
out_gt = f.apply(params, key, activations, masks)
jax.tree_map(lambda x: x.block_until_ready(), out_gt)
out_gt_msa = torch.as_tensor(np.array(out_gt["msa"]))
out_gt_pair = torch.as_tensor(np.array(out_gt["pair"]))
model = compare_utils.get_global_pretrained_openfold()
out_repro_msa, out_repro_pair = model.evoformer.blocks[0](
torch.as_tensor(activations["msa"]).cuda(),
torch.as_tensor(activations["pair"]).cuda(),
torch.as_tensor(masks["msa"]).cuda(),
torch.as_tensor(masks["pair"]).cuda(),
chunk_size=4,
_mask_trans=False,
)
out_repro_msa = out_repro_msa.cpu()
out_repro_pair = out_repro_pair.cpu()
assert torch.max(torch.abs(out_repro_msa - out_gt_msa) < consts.eps)
assert torch.max(torch.abs(out_repro_pair - out_gt_pair) < consts.eps)
class TestExtraMSAStack(unittest.TestCase):
def test_shape(self):
batch_size = 2
s_t = 23
n_res = 5
c_m = 7
c_z = 11
c_hidden_msa_att = 12
c_hidden_opm = 17
c_hidden_mul = 19
c_hidden_tri_att = 16
no_heads_msa = 3
no_heads_pair = 8
no_blocks = 2
transition_n = 5
msa_dropout = 0.15
pair_stack_dropout = 0.25
inf = 1e9
eps = 1e-10
es = ExtraMSAStack(
c_m,
c_z,
c_hidden_msa_att,
c_hidden_opm,
c_hidden_mul,
c_hidden_tri_att,
no_heads_msa,
no_heads_pair,
no_blocks,
transition_n,
msa_dropout,
pair_stack_dropout,
blocks_per_ckpt=None,
inf=inf,
eps=eps,
).eval()
m = torch.rand((batch_size, s_t, n_res, c_m))
z = torch.rand((batch_size, n_res, n_res, c_z))
msa_mask = torch.randint(
0,
2,
size=(
batch_size,
s_t,
n_res,
),
)
pair_mask = torch.randint(
0,
2,
size=(
batch_size,
n_res,
n_res,
),
)
shape_z_before = z.shape
z = es(m, z, chunk_size=4, msa_mask=msa_mask, pair_mask=pair_mask)
self.assertTrue(z.shape == shape_z_before)
class TestMSATransition(unittest.TestCase):
def test_shape(self):
batch_size = 2
s_t = 3
n_r = 5
c_m = 7
n = 11
mt = MSATransition(c_m, n)
m = torch.rand((batch_size, s_t, n_r, c_m))
shape_before = m.shape
m = mt(m, chunk_size=4)
shape_after = m.shape
self.assertTrue(shape_before == shape_after)
@compare_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_transition(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_trans = alphafold.model.modules.Transition(
c_e.msa_transition,
config.model.global_config,
name="msa_transition",
)
act = msa_trans(act=msa_act, mask=msa_mask)
return act
f = hk.transform(run_msa_transition)
n_res = consts.n_res
n_seq = consts.n_seq
msa_act = np.random.rand(n_seq, n_res, consts.c_m).astype(np.float32)
msa_mask = np.ones((n_seq, n_res)).astype(
np.float32
) # no mask here either
# Fetch pretrained parameters (but only from one block)]
params = compare_utils.fetch_alphafold_module_weights(
"alphafold/alphafold_iteration/evoformer/evoformer_iteration/"
+ "msa_transition"
)
params = tree_map(lambda n: n[0], params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt))
model = compare_utils.get_global_pretrained_openfold()
out_repro = (
model.evoformer.blocks[0]
.msa_transition(
torch.as_tensor(msa_act, dtype=torch.float32).cuda(),
mask=torch.as_tensor(msa_mask, dtype=torch.float32).cuda(),
)
.cpu()
)
self.assertTrue(torch.max(torch.abs(out_gt - out_repro) < consts.eps))
if __name__ == "__main__":
unittest.main()
| 30.456204
| 80
| 0.587777
|
f8590b69d1f18947ff62bb19e2b8e6df74a2d161
| 897
|
py
|
Python
|
app/app.py
|
ShuaiGao/mini-shop-server
|
8a72b2d457bba8778e97637027ffa82bfa11e8a9
|
[
"MIT"
] | null | null | null |
app/app.py
|
ShuaiGao/mini-shop-server
|
8a72b2d457bba8778e97637027ffa82bfa11e8a9
|
[
"MIT"
] | 1
|
2019-07-08T12:32:29.000Z
|
2019-07-08T12:32:29.000Z
|
app/app.py
|
ShuaiGao/mini-shop-server
|
8a72b2d457bba8778e97637027ffa82bfa11e8a9
|
[
"MIT"
] | null | null | null |
# _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/6/13.
"""
from datetime import date
from flask import Flask as _Flask, _request_ctx_stack
from flask.json import JSONEncoder as _JSONEncoder
from app.libs.error_code import ServerError
__author__ = 'Allen7D'
class JSONEncoder(_JSONEncoder):
def default(self, o):
if hasattr(o, 'keys') and hasattr(o, '__getitem__'):
return dict(o)
if isinstance(o, date):
return o.strftime('%Y-%m-%d')
raise ServerError()
class Flask(_Flask):
json_encoder = JSONEncoder
def dispatch_request(self):
req = _request_ctx_stack.top.request
if req.routing_exception is not None:
self.raise_routing_exception(req)
rule = req.url_rule
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return self.make_default_options_response()
return self.view_functions[rule.endpoint](**req.view_args)
| 24.916667
| 60
| 0.74136
|
4ed08162ac82aac7eabfb48be748903d7f1b6ed9
| 49
|
py
|
Python
|
src/utoolbox/util/decorator/__init__.py
|
thhsieh00/utoolbox-core
|
e46704348d60985c205a16f41788d2c185e11fb6
|
[
"Apache-2.0"
] | 3
|
2020-08-21T02:34:32.000Z
|
2021-04-06T06:56:46.000Z
|
src/utoolbox/util/decorator/__init__.py
|
liuyenting/utoolbox-core
|
d1430967458204b99780c547eaca60d066490946
|
[
"Apache-2.0"
] | null | null | null |
src/utoolbox/util/decorator/__init__.py
|
liuyenting/utoolbox-core
|
d1430967458204b99780c547eaca60d066490946
|
[
"Apache-2.0"
] | null | null | null |
from .benchmark import *
from .exceution import *
| 24.5
| 24
| 0.77551
|
1e7782350e37eb41d0db390bd5364aa153aa5afa
| 2,321
|
py
|
Python
|
tardis/montecarlo/montecarlo_numba/estimators.py
|
ArkaprabhaChakraborty/tardis
|
f52b6de55701c6880ad835ad714e911dcbe9ee96
|
[
"BSD-3-Clause"
] | 176
|
2015-02-26T07:26:59.000Z
|
2022-03-16T18:26:22.000Z
|
tardis/montecarlo/montecarlo_numba/estimators.py
|
ArkaprabhaChakraborty/tardis
|
f52b6de55701c6880ad835ad714e911dcbe9ee96
|
[
"BSD-3-Clause"
] | 1,474
|
2015-02-12T13:02:16.000Z
|
2022-03-31T09:05:54.000Z
|
tardis/montecarlo/montecarlo_numba/estimators.py
|
ArkaprabhaChakraborty/tardis
|
f52b6de55701c6880ad835ad714e911dcbe9ee96
|
[
"BSD-3-Clause"
] | 434
|
2015-02-07T17:15:41.000Z
|
2022-03-23T04:49:38.000Z
|
from numba import njit
from tardis.montecarlo.montecarlo_numba import (
njit_dict_no_parallel,
)
from tardis.montecarlo.montecarlo_numba.frame_transformations import (
calc_packet_energy,
calc_packet_energy_full_relativity,
)
from tardis.montecarlo.montecarlo_numba.numba_config import (
ENABLE_FULL_RELATIVITY,
)
@njit(**njit_dict_no_parallel)
def set_estimators(r_packet, distance, numba_estimator, comov_nu, comov_energy):
"""
Updating the estimators
"""
numba_estimator.j_estimator[r_packet.current_shell_id] += (
comov_energy * distance
)
numba_estimator.nu_bar_estimator[r_packet.current_shell_id] += (
comov_energy * distance * comov_nu
)
@njit(**njit_dict_no_parallel)
def set_estimators_full_relativity(
r_packet, distance, numba_estimator, comov_nu, comov_energy, doppler_factor
):
numba_estimator.j_estimator[r_packet.current_shell_id] += (
comov_energy * distance * doppler_factor
)
numba_estimator.nu_bar_estimator[r_packet.current_shell_id] += (
comov_energy * distance * comov_nu * doppler_factor
)
@njit(**njit_dict_no_parallel)
def update_line_estimators(
estimators, r_packet, cur_line_id, distance_trace, time_explosion
):
"""
Function to update the line estimators
Parameters
----------
estimators : tardis.montecarlo.montecarlo_numba.numba_interface.Estimators
r_packet : tardis.montecarlo.montecarlo_numba.r_packet.RPacket
cur_line_id : int
distance_trace : float
time_explosion : float
"""
""" Actual calculation - simplified below
r_interaction = math.sqrt(r_packet.r**2 + distance_trace**2 +
2 * r_packet.r * distance_trace * r_packet.mu)
mu_interaction = (r_packet.mu * r_packet.r + distance_trace) / r_interaction
doppler_factor = 1.0 - mu_interaction * r_interaction /
( time_explosion * C)
"""
if not ENABLE_FULL_RELATIVITY:
energy = calc_packet_energy(r_packet, distance_trace, time_explosion)
else:
energy = calc_packet_energy_full_relativity(r_packet)
estimators.j_blue_estimator[cur_line_id, r_packet.current_shell_id] += (
energy / r_packet.nu
)
estimators.Edotlu_estimator[
cur_line_id, r_packet.current_shell_id
] += energy
| 30.142857
| 80
| 0.724257
|
2d8795b45f27630f941cdabd8467490c38e5ed85
| 1,514
|
py
|
Python
|
crabageprediction/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_join.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 28,899
|
2016-10-13T03:32:12.000Z
|
2022-03-31T21:39:05.000Z
|
crabageprediction/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_join.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 31,004
|
2016-10-12T23:22:27.000Z
|
2022-03-31T23:17:38.000Z
|
crabageprediction/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_join.py
|
13rianlucero/CrabAgePrediction
|
92bc7fbe1040f49e820473e33cc3902a5a7177c7
|
[
"MIT"
] | 15,149
|
2016-10-13T03:21:31.000Z
|
2022-03-31T18:46:47.000Z
|
import numpy as np
from pandas import (
Index,
Timedelta,
timedelta_range,
)
import pandas._testing as tm
class TestJoin:
def test_append_join_nondatetimeindex(self):
rng = timedelta_range("1 days", periods=10)
idx = Index(["a", "b", "c", "d"])
result = rng.append(idx)
assert isinstance(result[0], Timedelta)
# it works
rng.join(idx, how="outer")
def test_join_self(self, join_type):
index = timedelta_range("1 day", periods=10)
joined = index.join(index, how=join_type)
tm.assert_index_equal(index, joined)
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(
10,
10,
data_gen_f=lambda *args, **kwargs: np.random.randn(),
r_idx_type="i",
c_idx_type="td",
)
str(df)
cols = df.columns.join(df.index, how="outer")
joined = cols.join(df.columns)
assert cols.dtype == np.dtype("O")
assert cols.dtype == joined.dtype
tm.assert_index_equal(cols, joined)
def test_join_preserves_freq(self):
# GH#32157
tdi = timedelta_range("1 day", periods=10)
result = tdi[:5].join(tdi[5:], how="outer")
assert result.freq == tdi.freq
tm.assert_index_equal(result, tdi)
result = tdi[:5].join(tdi[6:], how="outer")
assert result.freq is None
expected = tdi.delete(5)
tm.assert_index_equal(result, expected)
| 28.037037
| 65
| 0.593791
|
fc357ca481620bd6b4f20815f5691b4a0d917c56
| 65
|
py
|
Python
|
Personal tuitorials at Home/Many_Values_to_Multiple_Variables.py
|
thepros847/python_programiing
|
d177f79d0d1f21df434bf3f8663ae6469fcf8357
|
[
"MIT"
] | null | null | null |
Personal tuitorials at Home/Many_Values_to_Multiple_Variables.py
|
thepros847/python_programiing
|
d177f79d0d1f21df434bf3f8663ae6469fcf8357
|
[
"MIT"
] | null | null | null |
Personal tuitorials at Home/Many_Values_to_Multiple_Variables.py
|
thepros847/python_programiing
|
d177f79d0d1f21df434bf3f8663ae6469fcf8357
|
[
"MIT"
] | null | null | null |
x, y, z = "Orange", "Banana", "Cherry"
print(x)
print(y)
print(z)
| 16.25
| 38
| 0.6
|
b834929b7938f26cf7bd2136ef90ddbaf64d4e03
| 6,089
|
py
|
Python
|
radiative_equilibriumModel/runModel.py
|
Mihir-DG/Modelling-Planetary-Climate
|
4699d55d6ccecc4938f9844dd658e9c40c6d07c2
|
[
"MIT"
] | 1
|
2021-06-21T04:52:46.000Z
|
2021-06-21T04:52:46.000Z
|
radiative_equilibriumModel/runModel.py
|
Mihir-DG/Modelling-Planetary-Climate
|
4699d55d6ccecc4938f9844dd658e9c40c6d07c2
|
[
"MIT"
] | null | null | null |
radiative_equilibriumModel/runModel.py
|
Mihir-DG/Modelling-Planetary-Climate
|
4699d55d6ccecc4938f9844dd658e9c40c6d07c2
|
[
"MIT"
] | null | null | null |
import climt
import sympl
from sympl import AdamsBashforth
import matplotlib.pyplot as plt
import numpy as np
import math
import metpy.calc as calc
from metpy.units import units
import datetime
import csv
#Importing setup functions for profile development.
from setupMain import surf_airBdry_tempDiff, radiating_pressure, net_energy_level_in_column, netFlux, heatingRate
# Runs model to equilibrium ~ 260 days for maxTau = 6 [default].
# Uses net energy in atm. column as stopping criteria.
def runningModel(maxTau):
#Setting up system components in disequilbrium.
diagnostic = climt.Frierson06LongwaveOpticalDepth(longwave_optical_depth_at_equator=maxTau, linear_optical_depth_parameter=0.1)
radiation = climt.GrayLongwaveRadiation(tendencies_in_diagnostics=True)
surface = climt.SlabSurface()
albedo = 0.3
time_stepper = AdamsBashforth([radiation,surface])
timestep = datetime.timedelta(hours = 4)
state = climt.get_default_state([radiation, diagnostic, surface])
airPressure_vertCoord = np.array(state['air_pressure_on_interface_levels']).flatten()
sw_flux = 200
state.update(diagnostic(state))
state['downwelling_shortwave_flux_in_air'][:] = sw_flux
state['ocean_mixed_layer_thickness'][:] = 1.
state['air_temperature'][:] = 200. #Arbitrary init value
diff_acceptable = 5.
time = datetime.datetime(2020,1,1,0,0,0) # In months (Add 1/168 for each timedelta jump)
stop = False
radPres = radiating_pressure(state,diff_acceptable)
radHt = radPres[1]
#Creates list for assorted 0d output vars.
netEn = [(net_energy_level_in_column(state,diff_acceptable))[0]]
bdry_tempDiff = [surf_airBdry_tempDiff(state)]
olrs = [(np.array(state['upwelling_longwave_flux_in_air']).flatten())[-1]]
surfT = [(np.array(state['surface_temperature']).flatten())[0]]
counter = 0
errorMargin = 0.5
#Loop to increment time
while stop == False:
#Updating state
state.update(diagnostic(state))
diagnostics, state = time_stepper(state,timestep)
state.update(diagnostics)
#Updating appropriate quantities every month
if counter % 42 == 0:
netEn.append((net_energy_level_in_column(state,diff_acceptable))[0])
bdry_tempDiff.append(surf_airBdry_tempDiff(state))
olrs.append((np.array(state['upwelling_longwave_flux_in_air']).flatten())[-1])
surfT.append((np.array(state['surface_temperature']).flatten())[0])
# Checks breakout condition and increments time + counter.
counter += 1
time = time + timestep
if abs(net_energy_level_in_column(state,diff_acceptable)[0]) < errorMargin:
stop = True
#Calculating output quantities.
timeTaken = time - datetime.datetime(2020,1,1,0,0,0)
lwFluxNet, lwFluxUp, lwFluxDown = netFlux(state)
heatRate = heatingRate(state)
airTemperatureProf = (np.array(state['air_temperature'])).flatten()
return state, timeTaken, olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf, airPressure_vertCoord
def output_to_csv(timeTaken, olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf,airPressure_vertCoord):
print(airPressure_vertCoord)
with open('output_runModel/weekly_results.csv', mode='w') as weeklyCSV:
weeklyWriter = csv.writer(weeklyCSV)
weeklyWriter.writerow(olrs)
weeklyWriter.writerow((np.array(bdry_tempDiff)).flatten())
weeklyWriter.writerow(surfT)
weeklyWriter.writerow(netEn)
weeklyWriter.writerow(airPressure_vertCoord)
with open('output_runModel/equilibrium.csv', mode='w') as equilibriumCSV:
equilibriumWriter = csv.writer(equilibriumCSV)
equilibriumWriter.writerow(lwFluxNet)
equilibriumWriter.writerow(lwFluxUp)
equilibriumWriter.writerow(lwFluxDown)
equilibriumWriter.writerow(heatRate)
equilibriumWriter.writerow(airTemperatureProf)
equilibriumWriter.writerow(str(timeTaken))
equilibriumWriter.writerow(airPressure_vertCoord)
return 0.
def maxTau_eqTime():
maxTaus = [0.5,1,3,6,8,10,12.5,15]
out_Time = []
for maxT in maxTaus:
timeTaken, olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf = runningModel(maxT)
out_Time.append(timeTaken)
out_Time = [float(i.days) for i in out_Time]
return out_Time, maxTaus
# runningModel() calls first 3 fns; does not need to be called in main() for runningModel()
def main():
maxTau = 0.94
state, timeTaken, olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf, airPressure_vertCoord = runningModel(maxTau)
output_to_csv(timeTaken, olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf, airPressure_vertCoord)
print(state['upwelling_longwave_flux_in_air'][-1])
print(state['surface_temperature'])
#outTimes, maxTaus = maxTau_eqTime()
"""plt.xlabel("Aggregate Atmospheric Optimal Thickness")
plt.ylabel("Equilibrium Duration (Days)")
#plt.scatter(maxTaus,outTimes)
plt.plot(maxTaus,outTimes,color='black')
plt.savefig("../../../graphs_modelling/1dradiative-eq/maxTau_eqTime.png")
#plt.scatter()
idealizedSurfT = 288.2
idealizedOLR = 231.76
RMS = []
maxTau_ranges = np.linspace(0,6,20)
for maxTau in maxTau_ranges:
planetary_albedo = 0.29
#cleaningUp()
state, timeTaken, olrs, bdry_tempDiff, netEn, surfT, lwFluxNet, lwFluxUp, lwFluxDown, heatRate, airTemperatureProf = runningModel(maxTau)
olr = np.array(state['upwelling_longwave_flux_in_air'][-1]).flatten()[0]
surfT = np.array(state['surface_temperature']).flatten()[0]
errorFunc = math.sqrt((idealizedOLR-olr)**2 + (idealizedSurfT-surfT)**2)
print(errorFunc,maxTau)
RMS.append(errorFunc)
plt.plot(RMS)
plt.savefig("RMSE_RadEq.png")
plt.show()
print(np.amin(RMS))"""
if __name__ == '__main__':
main()
""" NOTE: DIFF ACCEPTABLE SET TO 5.
ONLY USE radHt after reaching equilbrium.
Required outs from runningModel():
1) olr - historical
2) bdry_tempDiff - historical
3) net Energy level in atm. - historical
4) surface temp. - historical
5) upFlux - eq.
6) downFlux - eq.
7) netFlux - eq.
8) heating rate - eq.
9) air temp (p) - eq.
10) opticalDepth (p) - eq. --> MAYBE!!
11) equilibrium time
"""
| 40.324503
| 161
| 0.767614
|
4d0a8860b92137fe74a1fa841ebb9c256e434103
| 3,277
|
py
|
Python
|
src/main/lambda/WeatherDataIngestion.py
|
JFKenso/fitbit_insights
|
a65fb75063a6deb057e8827a0e054adc7f35645e
|
[
"Apache-2.0"
] | null | null | null |
src/main/lambda/WeatherDataIngestion.py
|
JFKenso/fitbit_insights
|
a65fb75063a6deb057e8827a0e054adc7f35645e
|
[
"Apache-2.0"
] | null | null | null |
src/main/lambda/WeatherDataIngestion.py
|
JFKenso/fitbit_insights
|
a65fb75063a6deb057e8827a0e054adc7f35645e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import base64
import urllib2
import urllib
import sys
import json
import os
import boto3
from datetime import date, timedelta
from boto3.dynamodb.conditions import Key, Attr
from botocore.exceptions import ClientError
#Declare these global variables that we'll use for the access and refresh tokens
clientId = "XXX"
clientSecret = "XXX"
ddb = boto3.client('dynamodb')
ml = boto3.client('machinelearning')
#Some contants defining API error handling responses
ErrorInAPI = "Error when making API call that I couldn't handle"
#This makes an API call. It also catches errors and tries to deal with them
def MakeAPICall(InURL):
#Start the request
InURL = InURL + "&client_id="+clientId+"&client_secret="+clientSecret
req = urllib2.Request(InURL)
#Fire off the request
try:
#Do the request
response = urllib2.urlopen(req)
#Read the response
FullResponse = response.read()
#Return values
return True, FullResponse
#Catch errors, e.g. A 401 error that signifies the need for a new access token
except urllib2.URLError as e:
print ("Got this HTTP error: " + str(e.code))
HTTPErrorMessage = e.read()
print ("This was in the HTTP error message: " + HTTPErrorMessage)
#Return that this didn't work, allowing the calling function to handle it
return False, ErrorInAPI
#Main part of the code
def lambda_handler(event, context):
FitbitUserID = "ABC"
#This is the Aeris API
AerisAPI = "https://api.aerisapi.com/forecasts/closest?p=-37.8136,144.9631&filter=1hr&limit=24"
#Make the Profile API call
APICallOK, WeatherData = MakeAPICall(AerisAPI)
table = "WeatherData"
if APICallOK:
parsed_data = json.loads(WeatherData)
weatherObjs = parsed_data['response'][0]['periods']
for period in weatherObjs:
recordDateTime = str(period['validTime'])
maxTempC = str(period['maxTempC'])
minTempC = str(period['minTempC'])
precipMM = str(period['precipMM'])
humidity = str(period['humidity'])
uvi = str(period['uvi'])
pressureMB = str(period['pressureMB'])
sky = str(period['sky'])
feelslikeC = str(period['feelslikeC'])
windDirDEG = str(period['windDirDEG'])
windGustKPH = str(period['windGustKPH'])
windSpeedKPH = str(period['windSpeedKPH'])
weather = str(period['weather'])
weatherPrimaryCoded = str(period['weatherPrimaryCoded'])
isDay = str(period['isDay'])
item = {
"FitbitUserID": {"S": FitbitUserID},
"RecordDateTime": {"S": recordDateTime},
"maxTempC": {"S": maxTempC},
"minTempC": {"S": minTempC},
"precipMM": {"S": precipMM},
"humidity": {"S": humidity},
"uvi": {"S": uvi},
"pressureMB": {"S": pressureMB},
"sky": {"S": sky},
"feelslikeC": {"S": feelslikeC},
"windDirDEG": {"S": windDirDEG},
"windGustKPH": {"S": windGustKPH},
"windSpeedKPH": {"S": windSpeedKPH},
"weather": {"S": weather},
"weatherPrimaryCoded": {"S": weatherPrimaryCoded},
"isDay": {"S": isDay}
}
response = ddb.put_item(TableName = table, Item = item);
#print("put response: " + str(response))
pass
else:
print( ErrorInAPI )
| 30.626168
| 97
| 0.657309
|
a99348a6e51590128f65e33baf7efc9cbf5f1e54
| 1,759
|
py
|
Python
|
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/agent/lm/func_exec.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | 1
|
2019-05-24T02:19:29.000Z
|
2019-05-24T02:19:29.000Z
|
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/agent/lm/func_exec.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | null | null | null |
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/agent/lm/func_exec.py
|
radical-experiments/iceberg_escience
|
e5c230a23395a71a4adf554730ea3d77f923166c
|
[
"MIT"
] | null | null | null |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
from .base import LaunchMethod
# ==============================================================================
#
class FuncExec(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
LaunchMethod.__init__(self, cfg, session)
# --------------------------------------------------------------------------
#
def _configure(self):
self.launch_command = ''
self._req = rpu_Pubsub(self._session, 'func_request', rpu_PUBSUB_PUB)
self._res = rpu_Pubsub(self._session, 'func_result', rpu_PUBSUB_SUB)
self._res.subscribe('func_result')
# --------------------------------------------------------------------------
#
@classmethod
def lrms_config_hook(cls, name, cfg, lrms, logger, profiler):
# FIXME: - start func_exec pubsub
# - start result pubsub
pass
# --------------------------------------------------------------------------
#
def construct_command(self, cu, launch_script_hop):
# NOTE: ignore thread and process counts, and expect application to do
# the needful
slots = cu['slots']
cud = cu['description']
task_exec = cud['executable']
task_args = cud.get('arguments') or []
task_argstr = self._create_arg_string(task_args)
if task_argstr:
command = "%s %s" % (task_exec, task_argstr)
else:
command = task_exec
return command, None
# ------------------------------------------------------------------------------
| 26.651515
| 80
| 0.429221
|
0d019fb014d429779bd2c59234d657150bb86fa0
| 530
|
py
|
Python
|
atcoder/abc/c199.py
|
tomato-300yen/coding
|
db6f440a96d8c83f486005c650461a69f27e3926
|
[
"MIT"
] | null | null | null |
atcoder/abc/c199.py
|
tomato-300yen/coding
|
db6f440a96d8c83f486005c650461a69f27e3926
|
[
"MIT"
] | null | null | null |
atcoder/abc/c199.py
|
tomato-300yen/coding
|
db6f440a96d8c83f486005c650461a69f27e3926
|
[
"MIT"
] | null | null | null |
N = int(input())
NN = 2 * N
S = input()
Q = int(input())
TAB = [map(int, input().split()) for _ in range(Q)]
def trans(a):
return (a - N) % NN
# init
list_ = [i for i in range(NN)]
num_flip = 0
for t, a, b in TAB:
a, b = a - 1, b - 1
if t == 1:
if num_flip % 2 == 1:
a = trans(a)
b = trans(b)
list_[a], list_[b] = list_[b], list_[a]
else:
num_flip += 1
ans = "".join(map(lambda idx: S[idx], list_))
if num_flip % 2 == 1:
ans = ans[N:] + ans[:N]
print(ans)
| 17.666667
| 51
| 0.477358
|
4a4f3cf9a97abeaa410c9b1179f11e3184474b7b
| 240
|
py
|
Python
|
server.py
|
kleiba/pallyville
|
34f1ade802b6628d4fd9cbd39e729099517a4102
|
[
"MIT"
] | null | null | null |
server.py
|
kleiba/pallyville
|
34f1ade802b6628d4fd9cbd39e729099517a4102
|
[
"MIT"
] | null | null | null |
server.py
|
kleiba/pallyville
|
34f1ade802b6628d4fd9cbd39e729099517a4102
|
[
"MIT"
] | null | null | null |
from http.server import *
if __name__ == '__main__':
host = ''
port = 8080
server = HTTPServer((host, port), SimpleHTTPRequestHandler)
print("Starting HTTP server on port {}.".format(port))
server.serve_forever()
| 21.818182
| 63
| 0.65
|
db63051c8be116ef07248201edecc8eca0455e61
| 157
|
py
|
Python
|
pydarknet2/darknet/exceptions.py
|
dapperfu/pydarknet2
|
0457771152d4fbb8cfb512aada62a1c7e50862af
|
[
"MIT"
] | 2
|
2018-11-13T04:44:29.000Z
|
2019-08-06T08:25:34.000Z
|
pydarknet2/darknet/exceptions.py
|
dapperfu/pydarknet2
|
0457771152d4fbb8cfb512aada62a1c7e50862af
|
[
"MIT"
] | null | null | null |
pydarknet2/darknet/exceptions.py
|
dapperfu/pydarknet2
|
0457771152d4fbb8cfb512aada62a1c7e50862af
|
[
"MIT"
] | null | null | null |
"""Ipsum."""
class CloneException(Exception):
"""Clone exception."""
pass
class BuildException(Exception):
"""Build exception."""
pass
| 11.214286
| 32
| 0.617834
|
4e3b9767ea5476ed703c9f785963bb469a3ca910
| 244
|
py
|
Python
|
utils.py
|
ondrejba/monte_carlo
|
6bea9afcfaa813c0eb494ae76f615fc483846396
|
[
"MIT"
] | 14
|
2018-06-01T12:50:19.000Z
|
2021-01-05T22:18:26.000Z
|
utils.py
|
ondrejba/monte_carlo
|
6bea9afcfaa813c0eb494ae76f615fc483846396
|
[
"MIT"
] | null | null | null |
utils.py
|
ondrejba/monte_carlo
|
6bea9afcfaa813c0eb494ae76f615fc483846396
|
[
"MIT"
] | 5
|
2019-01-02T09:56:42.000Z
|
2022-03-11T06:10:22.000Z
|
def update_mean(value, mean, count):
"""
Update value of a streaming mean.
:param value: New value.
:param mean: Mean value.
:param count: Number of values averaged.
:return:
"""
return (value - mean) / (count + 1)
| 24.4
| 46
| 0.614754
|
e25c7326f751048f4e6942166f0e42514f3ef597
| 20,642
|
py
|
Python
|
zstackwoodpecker/zstackwoodpecker/operations/resource_operations.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
zstackwoodpecker/zstackwoodpecker/operations/resource_operations.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
zstackwoodpecker/zstackwoodpecker/operations/resource_operations.py
|
bgerxx/woodpecker
|
fdc51245945cc9be4d1f028988079213eb99b2ad
|
[
"Apache-2.0"
] | null | null | null |
'''
List resource API
@author: Youyk
'''
import apibinding.api_actions as api_actions
import account_operations
import apibinding.inventory as inventory
import time
import os
import sys
import traceback
#Define default get resource method. default is using searchAPI, it can also be ListAPI.
SEARCH_RESOURCE_METHOD = 'search'
LIST_RESOURCE_METHOD = 'list'
GET_RESOURCE_METHOD_BY_GET = 'get'
#GET_RESOURCE_METHOD = SEARCH_RESOURCE_METHOD
GET_RESOURCE_METHOD = LIST_RESOURCE_METHOD
BACKUP_STORAGE = 'BackupStorage'
SFTP_BACKUP_STORAGE = 'SftpBackupStorage'
CEPH_BACKUP_STORAGE = 'CephBackupStorage'
ZONE = 'Zone'
CLUSTER = 'Cluster'
PRIMARY_STORAGE = 'PrimaryStorage'
CEPH_PRIMARY_STORAGE = 'CephPrimaryStorage'
L2_NETWORK = 'L2Network'
L2_VLAN_NETWORK = 'L2VlanNetwork'
L2_VXLAN_NETWORK = 'L2VxlanNetwork'
L2_VXLAN_NETWORK_POOL = 'L2VxlanNetworkPool'
VNI_RANGE = 'VniRange'
L3_NETWORK = 'L3Network'
INSTANCE_OFFERING = 'InstanceOffering'
IMAGE = 'Image'
VOLUME = 'Volume'
SHARE_VOLUME = 'ShareVolume'
VM_INSTANCE = 'VmInstance'
IP_RANGE = 'IpRange'
HOST = 'Host'
NETWORK_SERVICE_PROVIDER = 'NetworkServiceProvider'
NETWORK_SERVICE_PROVIDER_L3_REF = 'NetworkServiceProviderL3Ref'
APPLIANCE_VM = 'ApplianceVm'
VIRTUALROUTER_VM = 'VirtualRouterVm'
DISK_OFFERING = 'DiskOffering'
ACCOUNT = 'Account'
USER = 'User'
PRIMARY_STORAGE = 'PrimaryStorage'
SECURITY_GROUP = 'SecurityGroup'
SECURITY_GROUP_RULE = 'SecurityGroupRule'
VM_SECURITY_GROUP = 'VmSecurityGroup'
VM_NIC = 'VmNic'
PORT_FORWARDING = 'PortForwarding'
MANAGEMENT_NODE = 'ManagementNode'
EIP = 'Eip'
VIP = 'Vip'
IP_CAPACITY = 'IpCapacity'
VR_OFFERING = 'VirtualRouterOffering'
SYSTEM_TAG = 'SystemTag'
USER_TAG = 'UserTag'
VOLUME_SNAPSHOT_TREE = 'VolumeSnapshotTree'
VOLUME_SNAPSHOT = 'VolumeSnapshot'
LOAD_BALANCER = 'LoadBalancer'
LOAD_BALANCER_LISTENER = 'LoadBalancerListener'
LOCAL_STORAGE_RESOURCE_REF = 'LocalStorageResourceRef'
IMAGE_STORE_BACKUP_STORAGE = 'ImageStoreBackupStorage'
SCHEDULER = 'Scheduler'
SCHEDULERJOB = 'SchedulerJob'
SCHEDULERTRIGGER = 'SchedulerTrigger'
VCENTER = 'VCenter'
VCENTER_CLUSTER = 'VCenterCluster'
VCENTER_BACKUP_STORAGE = 'VCenterBackupStorage'
VCENTER_PRIMARY_STORAGE = 'VCenterPrimaryStorage'
MONITOR_TRIGGER = 'MonitorTrigger'
MONITOR_TRIGGER_ACTION = 'MonitorTriggerAction'
PXE_SERVER = 'PxeServer'
CHASSIS = 'Chassis'
HWINFO = 'HardwareInfo'
def find_item_by_uuid(inventories, uuid):
for item in inventories:
if item.uuid == uuid:
#test_util.test_logger("Item found by UUID: %s" % uuid)
return [item]
#test_util.test_logger("Not found item with UUID: %s" % uuid)
return None
def find_item_by_name(inventories, name):
for item in inventories:
if item.name == name:
#test_util.test_logger("Item found by name: %s" % name)
return [item]
#test_util.test_logger("Not found item with name: %s" % name)
return None
#Using List API
def list_resource(resource, session_uuid=None, uuid=None, name=None):
'''
Return: list by list API.
'''
if resource == BACKUP_STORAGE:
action = api_actions.ListBackupStorageAction()
elif resource == ZONE:
action = api_actions.ListZonesAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.ListPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.ListL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.ListL2VlanNetworkAction()
elif resource == CLUSTER:
action = api_actions.ListClusterAction()
elif resource == L3_NETWORK:
action = api_actions.ListL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.ListInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.ListImageAction()
elif resource == VOLUME:
action = api_actions.ListVolumeAction()
elif resource == VM_INSTANCE:
action = api_actions.ListVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.ListIpRangeAction()
elif resource == HOST:
action = api_actions.ListHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.ListNetworkServiceProviderAction()
elif resource == APPLIANCE_VM:
action = api_actions.ListApplianceVmAction()
elif resource == DISK_OFFERING:
action = api_actions.ListDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.ListAccountAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.ListPrimaryStorageAction()
elif resource == SECURITY_GROUP:
action = api_actions.ListSecurityGroupAction()
elif resource == VM_SECURITY_GROUP:
action = api_actions.ListVmNicInSecurityGroupAction()
elif resource == VM_NIC:
action = api_actions.ListVmNicAction()
elif resource == PORT_FORWARDING:
action = api_actions.ListPortForwardingRuleAction()
elif resource == MANAGEMENT_NODE:
action = api_actions.ListManagementNodeAction()
ret = account_operations.execute_action_with_session(action, session_uuid)
if uuid:
return find_item_by_uuid(ret, uuid)
if name:
return find_item_by_name(ret, name)
return ret
#Using Search API
def search_resource(resource, session_uuid, uuid=None, name=None):
'''
Return: list by search
This API was depricated.
'''
if resource == BACKUP_STORAGE:
action = api_actions.SearchBackupStorageAction()
elif resource == ZONE:
action = api_actions.SearchZoneAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.SearchPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.SearchL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.SearchL2VlanNetworkAction()
elif resource == CLUSTER:
action = api_actions.SearchClusterAction()
elif resource == L3_NETWORK:
action = api_actions.SearchL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.SearchInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.SearchImageAction()
elif resource == VOLUME:
action = api_actions.SearchVolumeAction()
elif resource == VM_INSTANCE:
action = api_actions.SearchVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.SearchIpRangeAction()
elif resource == HOST:
action = api_actions.SearchHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.SearchNetworkServiceProviderAction()
elif resource == APPLIANCE_VM:
action = api_actions.SearchApplianceVmAction()
elif resource == DISK_OFFERING:
action = api_actions.SearchDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.SearchAccountAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.SearchPrimaryStorageAction()
#elif resource == SECURITY_GROUP:
# action = api_actions.SearchSecurityGroupAction()
#elif resource == VM_SECURITY_GROUP:
# action = api_actions.SearchVmNicInSecurityGroupAction()
action.sessionUuid = session_uuid
action.nameOpValueTriples = []
if uuid:
t = inventory.NOVTriple()
t.name = 'uuid'
t.op = inventory.AND_EQ
t.val = uuid
action.nameOpValueTriples.append(t)
if name:
t = inventory.NOVTriple()
t.name = 'name'
t.op = inventory.AND_EQ
t.val = name
action.nameOpValueTriples.append(t)
# the time delay is because of elastic search iventory will delay 0.5s after original data was created in database.
time.sleep(0.3)
ret = action.run()
return ret
def get_resource_by_get(resource, session_uuid, uuid):
'''
Return a list by get API.
'''
if resource == BACKUP_STORAGE:
action = api_actions.GetBackupStorageAction()
elif resource == ZONE:
action = api_actions.GetZoneAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.GetPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.GetL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.GetL2VlanNetworkAction()
elif resource == CLUSTER:
action = api_actions.GetClusterAction()
elif resource == L3_NETWORK:
action = api_actions.GetL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.GetInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.GetImageAction()
elif resource == VOLUME:
action = api_actions.GetVolumeAction()
elif resource == VM_INSTANCE:
action = api_actions.GetVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.GetIpRangeAction()
elif resource == HOST:
action = api_actions.GetHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.GetNetworkServiceProviderAction()
elif resource == APPLIANCE_VM:
action = api_actions.GetApplianceVmAction()
elif resource == DISK_OFFERING:
action = api_actions.GetDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.GetAccountAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.GetPrimaryStorageAction()
elif resource == VR_OFFERING:
action = api_actions.GetVirtualRouterOfferingAction()
#elif resource == SECURITY_GROUP:
# action = api_actions.GetSecurityGroupAction()
#elif resource == VM_SECURITY_GROUP:
# action = api_actions.GetVmNicInSecurityGroupAction()
action.uuid = uuid
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def gen_query_conditions(name, op, value, conditions=[]):
new_conditions = [{'name': name, 'op': op, 'value': value}]
new_conditions.extend(conditions)
return new_conditions
def _gen_query_action(resource):
if resource == BACKUP_STORAGE:
action = api_actions.QueryBackupStorageAction()
elif resource == SFTP_BACKUP_STORAGE:
action = api_actions.QuerySftpBackupStorageAction()
elif resource == CEPH_BACKUP_STORAGE:
action = api_actions.QueryCephBackupStorageAction()
elif resource == ZONE:
action = api_actions.QueryZoneAction()
elif resource == PRIMARY_STORAGE:
action = api_actions.QueryPrimaryStorageAction()
elif resource == L2_NETWORK:
action = api_actions.QueryL2NetworkAction()
elif resource == L2_VLAN_NETWORK:
action = api_actions.QueryL2VlanNetworkAction()
elif resource == L2_VXLAN_NETWORK:
action = api_actions.QueryL2VxlanNetworkAction()
elif resource == L2_VXLAN_NETWORK_POOL:
action = api_actions.QueryL2VxlanNetworkPoolAction()
elif resource == VNI_RANGE:
action = api_actions.QueryVniRangeAction()
elif resource == CLUSTER:
action = api_actions.QueryClusterAction()
elif resource == L3_NETWORK:
action = api_actions.QueryL3NetworkAction()
elif resource == INSTANCE_OFFERING:
action = api_actions.QueryInstanceOfferingAction()
elif resource == IMAGE:
action = api_actions.QueryImageAction()
elif resource == VOLUME:
action = api_actions.QueryVolumeAction()
elif resource == SHARE_VOLUME:
action = api_actions.QueryShareableVolumeVmInstanceRefAction()
elif resource == VM_INSTANCE:
action = api_actions.QueryVmInstanceAction()
elif resource == IP_RANGE:
action = api_actions.QueryIpRangeAction()
elif resource == HOST:
action = api_actions.QueryHostAction()
elif resource == NETWORK_SERVICE_PROVIDER:
action = api_actions.QueryNetworkServiceProviderAction()
elif resource == NETWORK_SERVICE_PROVIDER_L3_REF:
action = api_actions.QueryNetworkServiceL3NetworkRefAction()
elif resource == APPLIANCE_VM:
action = api_actions.QueryApplianceVmAction()
elif resource == VIRTUALROUTER_VM:
action = api_actions.QueryVirtualRouterVmAction()
elif resource == DISK_OFFERING:
action = api_actions.QueryDiskOfferingAction()
elif resource == ACCOUNT:
action = api_actions.QueryAccountAction()
elif resource == CEPH_PRIMARY_STORAGE:
action = api_actions.QueryCephPrimaryStorageAction()
elif resource == SECURITY_GROUP:
action = api_actions.QuerySecurityGroupAction()
elif resource == SECURITY_GROUP_RULE:
action = api_actions.QuerySecurityGroupRuleAction()
elif resource == VM_SECURITY_GROUP:
action = api_actions.QueryVmNicInSecurityGroupAction()
elif resource == VM_NIC:
action = api_actions.QueryVmNicAction()
elif resource == PORT_FORWARDING:
action = api_actions.QueryPortForwardingRuleAction()
elif resource == MANAGEMENT_NODE:
action = api_actions.QueryManagementNodeAction()
elif resource == EIP:
action = api_actions.QueryEipAction()
elif resource == VIP:
action = api_actions.QueryVipAction()
elif resource == VR_OFFERING:
action = api_actions.QueryVirtualRouterOfferingAction()
elif resource == SYSTEM_TAG:
action = api_actions.QuerySystemTagAction()
elif resource == USER_TAG:
action = api_actions.QueryUserTagAction()
elif resource == VOLUME_SNAPSHOT_TREE:
action = api_actions.QueryVolumeSnapshotTreeAction()
elif resource == VOLUME_SNAPSHOT:
action = api_actions.QueryVolumeSnapshotAction()
elif resource == USER:
action = api_actions.QueryUserAction()
elif resource == LOAD_BALANCER:
action = api_actions.QueryLoadBalancerAction()
elif resource == LOAD_BALANCER_LISTENER:
action = api_actions.QueryLoadBalancerListenerAction()
elif resource == LOCAL_STORAGE_RESOURCE_REF:
action = api_actions.QueryLocalStorageResourceRefAction()
elif resource == IMAGE_STORE_BACKUP_STORAGE:
action = api_actions.QueryImageStoreBackupStorageAction()
elif resource == SCHEDULER:
action = api_actions.QuerySchedulerAction()
elif resource == SCHEDULERJOB:
action = api_actions.QuerySchedulerJobAction()
elif resource == SCHEDULERTRIGGER:
action = api_actions.QuerySchedulerTriggerAction()
elif resource == VCENTER:
action = api_actions.QueryVCenterAction()
elif resource == VCENTER_CLUSTER:
action = api_actions.QueryVCenterClusterAction()
elif resource == VCENTER_BACKUP_STORAGE:
action = api_actions.QueryVCenterBackupStorageAction()
elif resource == VCENTER_PRIMARY_STORAGE:
action = api_actions.QueryVCenterPrimaryStorageAction()
elif resource == MONITOR_TRIGGER:
action = api_actions.QueryMonitorTriggerAction()
elif resource == MONITOR_TRIGGER_ACTION:
action = api_actions.QueryMonitorTriggerActionAction()
elif resource == PXE_SERVER:
action = api_actions.QueryBaremetalPxeServerAction()
elif resource == CHASSIS:
action = api_actions.QueryBaremetalChassisAction()
elif resource == HWINFO:
action = api_actions.QueryBaremetalHardwareInfoAction()
return action
def query_resource(resource, conditions = [], session_uuid=None, count='false'):
'''
Call Query API and return all matched resource.
conditions could be generated by gen_query_conditions()
If session_uuid is missing, we will create one for you and only live in
this API.
'''
action = _gen_query_action(resource)
action.conditions = conditions
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def query_resource_count(resource, conditions = [], session_uuid=None):
'''
Call Query API to return the matched resource count
When count=true, it will only return the number of matched resource
'''
action = _gen_query_action(resource)
action.conditions = conditions
action.count='true'
account_operations.execute_action_with_session(action, session_uuid)
return action.reply.total
def query_resource_with_num(resource, conditions = [], session_uuid=None, \
start=0, limit=1000):
'''
Query matched resource and return required numbers.
'''
action = _gen_query_action(resource)
action.conditions = conditions
action.start = start
action.limit = limit
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def query_resource_fields(resource, conditions = [], session_uuid=None, \
fields=[], start=0, limit=1000):
'''
Query matched resource by returning required fields and required numbers.
'''
action = _gen_query_action(resource)
action.conditions = conditions
action.start = start
action.limit = limit
action.fields = fields
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret
def get_resource(resource, session_uuid=None, uuid=None, name=None):
if uuid:
cond = gen_query_conditions('uuid', '=', uuid)
elif name:
cond = gen_query_conditions('name', '=', name)
else:
cond = gen_query_conditions('uuid', '!=', 'NULL')
return query_resource(resource, cond, session_uuid)
#if GET_RESOURCE_METHOD == LIST_RESOURCE_METHOD:
# return list_resource(resource, session_uuid, uuid=uuid, name=name)
#elif GET_RESOURCE_METHOD == GET_RESOURCE_METHOD_BY_GET:
# if not uuid:
# raise Exception('Get_Resource function error, uuid can not be None')
# return get_resource_by_get(resource, session_uuid, uuid=uuid)
#else:
# return search_resource(resource, session_uuid, uuid=uuid, name=name)
def safely_get_resource(res_name, cond = [], session_uuid = None, \
fields = None, limit = 100):
'''
If there are a lot of resource (e.g. >1k), query all of them in 1 command
is very dangours. It might crash ZStack, when the data is huge.
'''
res_count = query_resource_count(res_name, cond, session_uuid)
res_list = []
if res_count <= limit:
res_list = query_resource_fields(res_name, cond, session_uuid, fields)
else:
curr_count = 0
while curr_count <= res_count:
curr_list = query_resource_with_num(res_name, cond, \
session_uuid, fields, start=current_count, limit = limit)
res_list.extend(curr_list)
curr_count += limit
return res_list
def change_recource_owner(accountUuid, resourceUuid, session_uuid = None):
action = api_actions.ChangeResourceOwnerAction()
action.accountUuid = accountUuid
action.resourceUuid = resourceUuid
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_resource_owner(resourceUuid, session_uuid = None):
action = api_actions.GetResourceAccountAction()
action.resourceUuids = resourceUuid
ret = account_operations.execute_action_with_session(action, session_uuid)
return ret.inventories[resourceUuid[0]].uuid
def get_task_progress(apiId, session_uuid = None):
action = api_actions.GetTaskProgressAction()
action.apiId = apiId
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
def get_progress(apiId, session_uuid = None):
action = api_actions.GetTaskProgressAction()
action.apiId = apiId
evt = account_operations.execute_action_with_session(action, session_uuid)
inventories = []
for ei in evt.inventories:
if ei.type == 'Progress':
inventories.append(ei)
return inventories
def enable_change_vm_password(is_enable, resourceUuid, resourceType, session_uuid = None):
action = api_actions.EnableChangeVmPasswordAction()
action.enable = is_enable
action.resourceUuid = resourceUuid
action.resourceType = resourceType
evt = account_operations.execute_action_with_session(action, session_uuid)
return evt
| 39.020794
| 120
| 0.698866
|
d6d5bfad115c03c01450a6641aa8a5759b13c18e
| 2,964
|
py
|
Python
|
pwass/dimsensionality_reduction/simplicial_pca.py
|
mberaha/ProjectedWasserstein
|
20d19fc49f20124762eb497031cba0918b5eaadb
|
[
"BSD-3-Clause"
] | 1
|
2021-11-30T09:11:56.000Z
|
2021-11-30T09:11:56.000Z
|
pwass/dimsensionality_reduction/simplicial_pca.py
|
mberaha/ProjectedWasserstein
|
20d19fc49f20124762eb497031cba0918b5eaadb
|
[
"BSD-3-Clause"
] | 1
|
2021-05-25T08:49:25.000Z
|
2021-05-25T08:49:25.000Z
|
pwass/dimsensionality_reduction/simplicial_pca.py
|
mberaha/ProjectedWasserstein
|
20d19fc49f20124762eb497031cba0918b5eaadb
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from scipy.integrate import simps
from pwass.dimsensionality_reduction.base_pca import PCA
from pwass.spline import SplineBasis
class SimplicialPCA(object):
def __init__(self, nbasis, k=3, compute_spline=True, spline_basis=None):
self.nbasis = nbasis
self.k = k
self.remove_last_col = False
self.compute_spline = compute_spline
self.spline_basis = spline_basis
@staticmethod
def clr(f_eval, grid):
log_f = np.log(f_eval + 1e-30)
out = log_f - simps(log_f / (grid[-1] - grid[0]), grid)
return out
@staticmethod
def inv_clr(f_eval, grid):
out = np.exp(f_eval)
den = simps(out, grid)
return out / den
def fit(self, distribs, k):
"""
All the distributions must be defined on the same grid
"""
# TODO (@MARIO): check that the grid is the same for all
# distributions
if self.spline_basis is None:
self.spline_basis = SplineBasis(
self.k, xgrid=distribs[0].pdf_grid, nbasis=self.nbasis)
self.metric = self.spline_basis.metric
self._process_data(distribs)
self.k = k
coeff_centered = self.coeff_mat - self.bary
M = np.matmul(np.dot(coeff_centered.T, coeff_centered) +
np.eye(self.nbasis) * 1e-4, self.metric)
eig_vals, eig_vecs = np.linalg.eig(M)
eig_vals = np.real(eig_vals) + 1e-6
eig_vecs = np.real(eig_vecs) + 1e-6
eig_vecs = eig_vecs / np.sqrt(
np.diag(np.matmul(np.matmul(eig_vecs.T, self.metric), eig_vecs)))
aux = np.argsort(eig_vals)
self.eig_vals = np.flip(np.sort(eig_vals))
self.eig_vecs = np.flip(eig_vecs[:, aux], axis=1)
self.base_change = np.linalg.inv(self.eig_vecs)
def transform(self, distribs):
X = self.get_spline_mat(distribs)
X_trans = np.matmul(X - self.bary, self.base_change.T)[:, :self.k]
return X_trans
def pt_from_proj(self, proj_coord):
pt = np.dot(proj_coord, self.eig_vecs[:, :self.k].T)
return pt
def get_pdf(self, proj_coeffs):
func = self.spline_basis.eval_spline(proj_coeffs, self.pdf_grid)
return self.inv_clr(func, self.pdf_grid)
def _process_data(self, distribs):
self.pdf_grid = distribs[0].pdf_grid
self.ndata = len(distribs)
self.distribs = distribs
self.coeff_mat = self.get_spline_mat(distribs)
self.bary = np.mean(self.coeff_mat, axis=0)
def get_spline_mat(self, distribs):
out = np.zeros((len(distribs), self.nbasis))
for i, d in enumerate(distribs):
if self.compute_spline:
out[i, :] = self.spline_basis.get_spline_expansion(
self.clr(d.pdf_eval, d.pdf_grid))
else:
out[i, :] = d.clr_coeffs
return out
| 32.933333
| 77
| 0.602901
|
380e48809a93b6c2f6dcd9ef7eb066f010f7a407
| 958
|
py
|
Python
|
Coding Problem 3.3.12.py
|
opalqnka/CS1301xII
|
bba9cf34a7f6f0d1e0d90a73b933f3a90bdb825e
|
[
"MIT"
] | null | null | null |
Coding Problem 3.3.12.py
|
opalqnka/CS1301xII
|
bba9cf34a7f6f0d1e0d90a73b933f3a90bdb825e
|
[
"MIT"
] | null | null | null |
Coding Problem 3.3.12.py
|
opalqnka/CS1301xII
|
bba9cf34a7f6f0d1e0d90a73b933f3a90bdb825e
|
[
"MIT"
] | 1
|
2020-07-16T06:56:22.000Z
|
2020-07-16T06:56:22.000Z
|
mystery_string = "my cat your cat"
#You may modify the lines of code above, but don't move them!
#When you Submit your code, we'll change these lines to
#assign different values to the variables.
#Add some code below that will count and print how many
#times the character sequence "cat" appears in mystery_string.
#For example, for the string above, it would print 2.
#
#This one is tricky! Think carefully about for-each loops,
#conditionals, and booleans. How can you track what character
#you're currently looking for? We expect you'll use a loop
#and a single big conditional, but there are other approaches
#as well. Try to stick with the topics we've covered so far.
#Add your code here!
counter = 0
result = 0
for char in mystery_string:
if counter <= (len(mystery_string) - 3):
if char == "c" and mystery_string[counter+1] == "a" and mystery_string[counter+2]=="t":
result += 1
counter += 1
print(result)
| 35.481481
| 95
| 0.716075
|
3199dbe4aad30f7ee646a64f0ef474c76c95aef1
| 5,995
|
py
|
Python
|
mayatools/geocache/exporter.py
|
westernx/mayatools
|
47c91050cb54167268d456e130ffce2d55373381
|
[
"BSD-3-Clause"
] | 47
|
2015-01-07T17:38:39.000Z
|
2022-03-22T02:42:39.000Z
|
mayatools/geocache/exporter.py
|
westernx/mayatools
|
47c91050cb54167268d456e130ffce2d55373381
|
[
"BSD-3-Clause"
] | 1
|
2016-04-25T09:02:57.000Z
|
2016-04-25T13:55:13.000Z
|
mayatools/geocache/exporter.py
|
westernx/mayatools
|
47c91050cb54167268d456e130ffce2d55373381
|
[
"BSD-3-Clause"
] | 12
|
2015-07-13T12:32:35.000Z
|
2020-04-29T02:58:49.000Z
|
from __future__ import absolute_import
import os
import re
import traceback
from maya import cmds, mel
from mayatools.playblast import screenshot
from sgfs import SGFS
from sgfs.commands.utils import parse_spec
import metatools.deprecate
import qbfutures.maya
import sgpublish.commands.utils as publish_cli_utils
import sgpublish.exporter.maya
from .utils import export_cache
def run():
import warnings
warnings.warn('exporter.run moved to exporterui')
from .exporterui import run
run()
def cache_name_from_cache_set(path):
name_parts = path.split(':')
name_parts[-1] = name_parts[-1].replace('cache', '_')
name_parts = [re.sub(r'[\W_]+', '_', x).strip('_') for x in name_parts]
name_parts[-1] = '_' + name_parts[-1]
return '_'.join(name_parts).strip('_')
class Exporter(sgpublish.exporter.maya.Exporter):
def __init__(self):
super(Exporter, self).__init__(
workspace=cmds.workspace(q=True, fullName=True) or None,
filename_hint=cmds.file(q=True, sceneName=True) or 'geocache.mb',
publish_type='maya_geocache',
)
def add_path_to_work(self, directory, to_cache):
for members, name, frame_from, frame_to, world in to_cache:
yield members, os.path.join(directory, name), name, frame_from, frame_to, world
def export_publish(self, publish, **kwargs):
# Set the path to the directory.
publish.path = publish.directory
kwargs['name'] = '%s - v%04d' % (publish.name, publish.version)
kwargs['alembic_metadata'] = metadata = (kwargs.get('alembic_metadata') or {}).copy()
metadata['sgpublish'] = {
'entity': publish.entity.minimal,
'name': publish.name,
'path': publish.path,
'type': publish.type,
'version': publish.version,
}
self.export(publish.directory, publish.path, **kwargs)
def export(self, directory, path, to_cache, on_farm=False, as_abc=True, alembic_metadata=None, name=None):
if not os.path.exists(directory):
os.makedirs(directory)
# Save the scene itself into the directory.
src_path = cmds.file(q=True, sceneName=True)
src_ext = os.path.splitext(src_path)[1]
dst_path = os.path.join(directory, os.path.basename(src_path))
maya_type = 'mayaBinary' if src_ext == '.mb' else 'mayaAscii'
try:
cmds.file(rename=dst_path)
cmds.file(save=True, type=maya_type)
finally:
cmds.file(rename=src_path)
# Add the path.
to_cache = self.add_path_to_work(path, to_cache)
if on_farm:
executor = qbfutures.maya.Executor(
cpus=4,
clone_environ=True,
create_tempfile=True,
)
with executor.batch('Geocache Export - %s' % (name or os.path.basename(path))) as batch:
for args in to_cache:
members, path, name, frame_from, frame_to, world = args
batch.submit_ext(export_cache, args=args, kwargs={'as_abc': as_abc, 'alembic_metadata': alembic_metadata}, name=str(name))
try:
from PyQt4 import QtGui
except ImportError:
print 'job', batch.futures[0].job_id
else:
QtGui.QMessageBox.information(None, "Submitted to Qube", "The geocache export was submitted as job %d" % batch.futures[0].job_id)
if not on_farm:
for args in to_cache:
export_cache(*args, as_abc=as_abc, alembic_metadata=alembic_metadata)
def main(argv=None):
import argparse
import logging
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument('--no-world', action='store_true')
parser.add_argument('--no-abc', action='store_true')
parser.add_argument('-s', '--start', type=int)
parser.add_argument('-e', '--end', type=int)
parser.add_argument('-d', '--out-dir')
publish_cli_utils.add_publisher_arguments(parser, short_flags=False, prefix=True)
parser.add_argument('-l', '--list-sets', action='store_true')
parser.add_argument('scene')
parser.add_argument('cache_sets', nargs='*')
args = parser.parse_args(argv)
publisher_kwargs = publish_cli_utils.extract_publisher_kwargs(args)
log.info('initializing Maya')
import maya.standalone
maya.standalone.initialize()
log.info('loading file')
cmds.file(args.scene, open=True)
log.info('done loading file')
cache_sets = set(cmds.ls(*(args.cache_sets or ['__cache__*']), sets=True, recursive=True, long=True) or ())
if args.list_sets:
print '\n'.join(sorted(cache_sets))
return
frame_from = args.start or cmds.playbackOptions(q=True, animationStartTime=True)
frame_to = args.end or cmds.playbackOptions(q=True, animationEndTime=True)
world = not args.no_world
as_abc = not args.no_abc
to_cache = []
for cache_set in cache_sets:
members = cmds.sets(cache_set, q=True)
name = cache_name_from_cache_set(cache_set) or 'cache'
to_cache.append((members, name, frame_from, frame_to, world))
exporter = Exporter()
name = os.path.splitext(os.path.basename(args.scene))[0]
if 'link' in publisher_kwargs or 'template' in publisher_kwargs:
if 'template' not in publisher_kwargs:
publisher_kwargs['name'] = name
exporter.publish(export_kwargs=dict(to_cache=to_cache, as_abc=as_abc), **publisher_kwargs)
else:
directory = args.out_dir or os.path.join(args.scene, '..', 'data', 'geo_cache', name)
exporter.export(directory=directory, path=directory, to_cache=to_cache, as_abc=as_abc)
log.info('DONE')
if __name__ == '__main__':
try:
main()
except Exception as e:
traceback.print_exc()
os._exit(1)
else:
os._exit(0)
| 33.870056
| 145
| 0.641868
|
e84345f014083de835a70bba7fc77662a2f0bb46
| 13,960
|
py
|
Python
|
extractors/i3d/i3d.py
|
KTaskn/MILForVideos
|
cf8921b3a9b133224bcefe3bc11e6b7f1c61fa82
|
[
"Apache-2.0"
] | null | null | null |
extractors/i3d/i3d.py
|
KTaskn/MILForVideos
|
cf8921b3a9b133224bcefe3bc11e6b7f1c61fa82
|
[
"Apache-2.0"
] | null | null | null |
extractors/i3d/i3d.py
|
KTaskn/MILForVideos
|
cf8921b3a9b133224bcefe3bc11e6b7f1c61fa82
|
[
"Apache-2.0"
] | null | null | null |
#I3D code adapted from https://github.com/piergiaj/pytorch-i3d
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class MaxPool3dSamePadding(nn.MaxPool3d):
def compute_pad(self, dim, s):
if s % self.stride[dim] == 0:
return max(self.kernel_size[dim] - self.stride[dim], 0)
else:
return max(self.kernel_size[dim] - (s % self.stride[dim]), 0)
def forward(self, x):
# compute 'same' padding
(batch, channel, t, h, w) = x.size()
# print t,h,w
out_t = np.ceil(float(t) / float(self.stride[0]))
out_h = np.ceil(float(h) / float(self.stride[1]))
out_w = np.ceil(float(w) / float(self.stride[2]))
# print out_t, out_h, out_w
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
# print pad_t, pad_h, pad_w
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
# print x.size()
# print pad
x = F.pad(x, pad)
return super(MaxPool3dSamePadding, self).forward(x)
class Unit3D(nn.Module):
def __init__(self, in_channels,
output_channels,
kernel_shape=(1, 1, 1),
stride=(1, 1, 1),
padding=0,
activation_fn=F.relu,
use_batch_norm=True,
use_bias=False,
name='unit_3d'):
"""Initializes Unit3D module."""
super(Unit3D, self).__init__()
self._output_channels = output_channels
self._kernel_shape = kernel_shape
self._stride = stride
self._use_batch_norm = use_batch_norm
self._activation_fn = activation_fn
self._use_bias = use_bias
self.name = name
self.padding = padding
self.conv3d = nn.Conv3d(in_channels=in_channels,
out_channels=self._output_channels,
kernel_size=self._kernel_shape,
stride=self._stride,
padding=0,
# we always want padding to be 0 here. We will dynamically pad based on input size in forward function
bias=self._use_bias)
if self._use_batch_norm:
self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01)
def compute_pad(self, dim, s):
if s % self._stride[dim] == 0:
return max(self._kernel_shape[dim] - self._stride[dim], 0)
else:
return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0)
def forward(self, x):
# compute 'same' padding
(batch, channel, t, h, w) = x.size()
# print t,h,w
out_t = np.ceil(float(t) / float(self._stride[0]))
out_h = np.ceil(float(h) / float(self._stride[1]))
out_w = np.ceil(float(w) / float(self._stride[2]))
# print out_t, out_h, out_w
pad_t = self.compute_pad(0, t)
pad_h = self.compute_pad(1, h)
pad_w = self.compute_pad(2, w)
# print pad_t, pad_h, pad_w
pad_t_f = pad_t // 2
pad_t_b = pad_t - pad_t_f
pad_h_f = pad_h // 2
pad_h_b = pad_h - pad_h_f
pad_w_f = pad_w // 2
pad_w_b = pad_w - pad_w_f
pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b)
# print x.size()
# print pad
x = F.pad(x, pad)
# print x.size()
x = self.conv3d(x)
if self._use_batch_norm:
x = self.bn(x)
if self._activation_fn is not None:
x = self._activation_fn(x)
return x
class InceptionModule(nn.Module):
def __init__(self, in_channels, out_channels, name):
super(InceptionModule, self).__init__()
self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_0/Conv3d_0a_1x1')
self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_1/Conv3d_0a_1x1')
self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[3, 3, 3],
name=name + '/Branch_1/Conv3d_0b_3x3')
self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_2/Conv3d_0a_1x1')
self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[3, 3, 3],
name=name + '/Branch_2/Conv3d_0b_3x3')
self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3],
stride=(1, 1, 1), padding=0)
self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0,
name=name + '/Branch_3/Conv3d_0b_1x1')
self.name = name
def forward(self, x):
b0 = self.b0(x)
b1 = self.b1b(self.b1a(x))
b2 = self.b2b(self.b2a(x))
b3 = self.b3b(self.b3a(x))
return torch.cat([b0, b1, b2, b3], dim=1)
class InceptionI3d(nn.Module):
"""Inception-v1 I3D architecture.
The model is introduced in:
Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset
Joao Carreira, Andrew Zisserman
https://arxiv.org/pdf/1705.07750v1.pdf.
See also the Inception architecture, introduced in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
"""
# Endpoints of the model in order. During construction, all the endpoints up
# to a designated `final_endpoint` are returned in a dictionary as the
# second return value.
VALID_ENDPOINTS = (
'Conv3d_1a_7x7',
'MaxPool3d_2a_3x3',
'Conv3d_2b_1x1',
'Conv3d_2c_3x3',
'MaxPool3d_3a_3x3',
'Mixed_3b',
'Mixed_3c',
'MaxPool3d_4a_3x3',
'Mixed_4b',
'Mixed_4c',
'Mixed_4d',
'Mixed_4e',
'Mixed_4f',
'MaxPool3d_5a_2x2',
'Mixed_5b',
'Mixed_5c',
'Logits',
'Predictions',
)
def __init__(self, num_classes=157, spatial_squeeze=True,
final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5):
"""Initializes I3D model instance.
Args:
num_classes: The number of outputs in the logit layer (default 400, which
matches the Kinetics dataset).
spatial_squeeze: Whether to squeeze the spatial dimensions for the logits
before returning (default True).
final_endpoint: The model contains many possible endpoints.
`final_endpoint` specifies the last endpoint for the model to be built
up to. In addition to the output at `final_endpoint`, all the outputs
at endpoints up to `final_endpoint` will also be returned, in a
dictionary. `final_endpoint` must be one of
InceptionI3d.VALID_ENDPOINTS (default 'Logits').
name: A string (optional). The name of this module.
Raises:
ValueError: if `final_endpoint` is not recognized.
"""
if final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % final_endpoint)
super(InceptionI3d, self).__init__()
self._num_classes = num_classes
self._spatial_squeeze = spatial_squeeze
self._final_endpoint = final_endpoint
self.logits = None
if self._final_endpoint not in self.VALID_ENDPOINTS:
raise ValueError('Unknown final endpoint %s' % self._final_endpoint)
self.end_points = {}
end_point = 'Conv3d_1a_7x7'
self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7],
stride=(2, 2, 2), padding=(3, 3, 3), name=name + end_point)
if self._final_endpoint == end_point: return
end_point = 'MaxPool3d_2a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
if self._final_endpoint == end_point: return
end_point = 'Conv3d_2b_1x1'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0,
name=name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Conv3d_2c_3x3'
self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1,
name=name + end_point)
if self._final_endpoint == end_point: return
end_point = 'MaxPool3d_3a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2),
padding=0)
if self._final_endpoint == end_point: return
end_point = 'Mixed_3b'
self.end_points[end_point] = InceptionModule(192, [64, 96, 128, 16, 32, 32], name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Mixed_3c'
self.end_points[end_point] = InceptionModule(256, [128, 128, 192, 32, 96, 64], name + end_point)
if self._final_endpoint == end_point: return
end_point = 'MaxPool3d_4a_3x3'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2),
padding=0)
if self._final_endpoint == end_point: return
end_point = 'Mixed_4b'
self.end_points[end_point] = InceptionModule(128 + 192 + 96 + 64, [192, 96, 208, 16, 48, 64], name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Mixed_4c'
self.end_points[end_point] = InceptionModule(192 + 208 + 48 + 64, [160, 112, 224, 24, 64, 64], name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Mixed_4d'
self.end_points[end_point] = InceptionModule(160 + 224 + 64 + 64, [128, 128, 256, 24, 64, 64], name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Mixed_4e'
self.end_points[end_point] = InceptionModule(128 + 256 + 64 + 64, [112, 144, 288, 32, 64, 64], name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Mixed_4f'
self.end_points[end_point] = InceptionModule(112 + 288 + 64 + 64, [256, 160, 320, 32, 128, 128],
name + end_point)
if self._final_endpoint == end_point: return
end_point = 'MaxPool3d_5a_2x2'
self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2),
padding=0)
if self._final_endpoint == end_point: return
end_point = 'Mixed_5b'
self.end_points[end_point] = InceptionModule(256 + 320 + 128 + 128, [256, 160, 320, 32, 128, 128],
name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Mixed_5c'
self.end_points[end_point] = InceptionModule(256 + 320 + 128 + 128, [384, 192, 384, 48, 128, 128],
name + end_point)
if self._final_endpoint == end_point: return
end_point = 'Logits'
self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7],
stride=(1, 1, 1))
self.dropout = nn.Dropout(dropout_keep_prob)
self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, output_channels=self._num_classes,
kernel_shape=[1, 1, 1],
padding=0,
activation_fn=None,
use_batch_norm=False,
use_bias=True,
name='logits')
self.build()
def replace_logits(self, num_classes):
self._num_classes = num_classes
self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, output_channels=self._num_classes,
kernel_shape=[1, 1, 1],
padding=0,
activation_fn=None,
use_batch_norm=False,
use_bias=True,
name='logits')
def build(self):
for k in self.end_points.keys():
self.add_module(k, self.end_points[k])
def forward(self, x):
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
x = self._modules[end_point](x) # use _modules to work with dataparallel
x = self.avg_pool(x)
if self._spatial_squeeze:
logits = x.squeeze(3).squeeze(3)
# logits is batch X time X classes, which is what we want to work with
return logits
def extract_features(self, x):
for end_point in self.VALID_ENDPOINTS:
if end_point in self.end_points:
x = self._modules[end_point](x)
return self.avg_pool(x)
| 42.048193
| 134
| 0.569914
|
8dd18ba55baad86b49dfa87f477fe4fbc4dd5190
| 7,621
|
py
|
Python
|
files/intraday_cron_lambda/index.py
|
dwp/dataworks-aws-ingest-replica
|
ed5fb0ebd57334be24244c711ed72b6f9ea2b734
|
[
"0BSD"
] | null | null | null |
files/intraday_cron_lambda/index.py
|
dwp/dataworks-aws-ingest-replica
|
ed5fb0ebd57334be24244c711ed72b6f9ea2b734
|
[
"0BSD"
] | 95
|
2021-03-29T15:02:03.000Z
|
2022-03-28T12:20:11.000Z
|
files/intraday_cron_lambda/index.py
|
dwp/dataworks-aws-ingest-replica
|
ed5fb0ebd57334be24244c711ed72b6f9ea2b734
|
[
"0BSD"
] | 1
|
2021-04-10T22:12:58.000Z
|
2021-04-10T22:12:58.000Z
|
import json
import logging
import os
import time
from uuid import uuid4
import base64
import ast
import boto3
from boto3.dynamodb.conditions import Attr
_logger = logging.getLogger()
_logger.setLevel(logging.INFO)
# Lambda environment vars
JOB_STATUS_TABLE = os.environ["job_status_table_name"]
SLACK_ALERT_ARN = os.environ["alert_topic_arn"]
LAUNCH_SNS_TOPIC_ARN = os.environ["launch_topic_arn"]
EMR_CONFIG_BUCKET = os.environ["emr_config_bucket"]
EMR_CONFIG_PREFIX = os.environ["emr_config_folder"]
COLLECTIONS_SECRET_NAME = os.environ["collections_secret_name"]
# Job Statuses & values stored in DynamoDB
TRIGGERED = "LAMBDA_TRIGGERED" # this lambda was triggered
WAITING = "WAITING" # this lambda is waiting up to 10 minutes for another cluster
DEFERRED = "DEFERRED" # this lambda timed out waiting, did not launch emr
LAUNCHED = "EMR_LAUNCHED" # this lambda posted to SNS topic to launch EMR cluster
PROCESSING = "EMR_PROCESSING" # emr cluster has started processing data
COMPLETED = "EMR_COMPLETED" # emr cluster processed the data successfully
EMR_FAILED = "EMR_FAILED" # emr cluster couldn't process data successfully
LAMBDA_FAILED = "LAMBDA_FAILED" # lambda encountered an error
# this lambda will not launch emr if another job is in one of these states
ACTIVE_STATES = [TRIGGERED, WAITING, LAUNCHED, PROCESSING, EMR_FAILED, LAMBDA_FAILED]
class PollingTimeoutError(TimeoutError):
pass
def get_collections_list_from_aws(secrets_client, collections_secret_name):
"""Parse collections returned by AWS Secrets Manager"""
return [
f"{j['db']}:{j['table']}"
for j in retrieve_secrets(secrets_client, collections_secret_name)[
"collections_all"
].values()
]
def retrieve_secrets(secrets_client, secret_name):
"""Get b64 encoded secrets from AWS Secrets Manager"""
response = secrets_client.get_secret_value(SecretId=secret_name)
response_binary = response["SecretString"]
response_decoded = base64.b64decode(response_binary).decode("utf-8")
response_dict = ast.literal_eval(response_decoded)
return response_dict
def update_db_items(table, collections, correlation_id: str, values: dict):
_logger.info(f"Updating db item: {values}")
updates = {key: {"Value": value} for key, value in values.items()}
for collection in collections:
table.update_item(
Key={
"CorrelationId": correlation_id,
"Collection": collection,
},
AttributeUpdates=updates,
)
def check_for_running_jobs(table, collections, correlation_id):
_logger.debug("Checking for running jobs")
results = table.scan(
FilterExpression=Attr("JobStatus").is_in(ACTIVE_STATES)
& Attr("CorrelationId").ne(correlation_id)
& Attr("Collection").is_in(collections),
)
return results["Items"]
def poll_previous_jobs(correlation_id, collections, table, timeout=300):
_logger.info("Polling for previous running jobs")
start_time = time.time()
running_jobs = check_for_running_jobs(table, collections, correlation_id)
while running_jobs:
_logger.info(
f"Waited {round(time.time() - start_time)}/{timeout}"
f" seconds for previous collections to complete:"
)
_logger.info(
"\n".join(
[
str({"topic": row["Collection"], "JobStatus": row["JobStatus"]})
for row in running_jobs
]
)
)
if time.time() >= (start_time + timeout):
update_db_items(table, collections, correlation_id, {"JobStatus": DEFERRED})
raise PollingTimeoutError(
f"Polling timeout ({timeout}s), job(s) still in progress"
)
time.sleep(5)
running_jobs = check_for_running_jobs(table, collections, correlation_id)
return True
def launch_cluster(
correlation_id: str,
triggered_time: int,
collections,
sns_client,
job_table,
topic_arn: str,
):
# Cluster takes 10~15m to provision, this provides adequate time for the pipeline
# to ingest data up to the current timestamp into hbase.
new_end_time = int(time.time() * 1000)
cluster_overrides = json.dumps(
{
"s3_overrides": {
"emr_launcher_config_s3_bucket": EMR_CONFIG_BUCKET,
"emr_launcher_config_s3_folder": EMR_CONFIG_PREFIX,
},
"additional_step_args": {
"spark-submit": [
"scheduled",
"--correlation_id",
str(correlation_id),
"--triggered_time",
str(triggered_time),
"--end_time",
str(new_end_time),
"--collections",
]
+ collections
},
}
)
_logger.info("Launching emr cluster")
_logger.info("Collections: " + " ".join(collections))
_logger.debug({"Cluster Overrides": cluster_overrides})
_ = sns_client.publish(
TopicArn=topic_arn,
Message=cluster_overrides,
Subject="Launch ingest-replica emr cluster",
)
update_db_items(job_table, collections, correlation_id, {"JobStatus": LAUNCHED})
def handler(event, context):
correlation_id = str(uuid4())
triggered_time = round(time.time() * 1000)
_logger.info(
{
"correlation_id": correlation_id,
"triggered_time": triggered_time,
}
)
sns_client = boto3.client("sns")
dynamodb = boto3.resource("dynamodb")
job_table = dynamodb.Table(JOB_STATUS_TABLE)
secrets_client = boto3.session.Session().client(service_name="secretsmanager")
collections = get_collections_list_from_aws(secrets_client, COLLECTIONS_SECRET_NAME)
_logger.info({"collections": collections})
update_db_items(
job_table,
collections,
correlation_id,
{"JobStatus": TRIGGERED, "TriggeredTime": triggered_time},
)
try:
update_db_items(job_table, collections, correlation_id, {"JobStatus": WAITING})
poll_previous_jobs(
correlation_id=correlation_id, collections=collections, table=job_table
)
launch_cluster(
correlation_id=correlation_id,
triggered_time=triggered_time,
collections=collections,
sns_client=sns_client,
job_table=job_table,
topic_arn=LAUNCH_SNS_TOPIC_ARN,
)
except PollingTimeoutError:
# Dynamodb already updated with status
alert_message = json.dumps(
{
"severity": "High",
"notification_type": "Warning",
"title_text": "Intraday Cluster Launch Deferred - Previous cluster still running",
}
)
sns_client.publish(
TargetArn=SLACK_ALERT_ARN,
Message=alert_message,
)
raise
except Exception:
# Update Dynamodb with failure status
update_db_items(
job_table, collections, correlation_id, {"JobStatus": LAMBDA_FAILED}
)
alert_message = json.dumps(
{
"severity": "Critical",
"notification_type": "Error",
"title_text": "intraday_cron_launcher Lambda Failed",
"log_with_here": "true",
}
)
sns_client.publish(
TargetArn=SLACK_ALERT_ARN,
Message=alert_message,
)
raise
| 34.022321
| 98
| 0.638368
|
5789c4592b7d9e925061612cb4b2ca07d7266e12
| 759
|
py
|
Python
|
Python files/dijkstra test.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
Python files/dijkstra test.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
Python files/dijkstra test.py
|
mattl1598/Project-Mochachino
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: matthewl9
#
# Created: 06/03/2018
# Copyright: (c) matthewl9 2018
# Licence: <your licence>
#-------------------------------------------------------------------------------
import random
import dijkstra #work for dictionarys not matrixes
def createMatrix(adjMatrix):
for x in range(9):
for y in range(9):
rand = random.randint(1,15)
#print
adjMatrix[x][y] = rand
return adjMatrix
def main():
w, h = 9, 9;
adjMatrix = [[None for x in range(w)] for y in range(h)]
adjMatrix = createMatrix(adjMatrix)
print(adjMatrix)
print(dijkstra.shortestPath(adjMatrix,2,7))
if __name__ == '__main__':
main()
| 25.3
| 80
| 0.523057
|
76781f193212a3f2577c47e98416a4675e87446a
| 20,137
|
py
|
Python
|
openprocurement/auctions/lease/tests/base.py
|
antonkorobko/openprocurement.auctions.lease
|
56e04f2a6f3593bf2b1dd867fad7d7deec1f425a
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/auctions/lease/tests/base.py
|
antonkorobko/openprocurement.auctions.lease
|
56e04f2a6f3593bf2b1dd867fad7d7deec1f425a
|
[
"Apache-2.0"
] | 23
|
2018-05-18T13:55:58.000Z
|
2020-01-06T18:52:48.000Z
|
openprocurement/auctions/lease/tests/base.py
|
antonkorobko/openprocurement.auctions.lease
|
56e04f2a6f3593bf2b1dd867fad7d7deec1f425a
|
[
"Apache-2.0"
] | 9
|
2018-05-15T09:32:12.000Z
|
2019-04-30T09:54:47.000Z
|
# -*- coding: utf-8 -*-
import os
from base64 import b64encode
from copy import deepcopy
from datetime import datetime, timedelta
from urllib import urlencode
from uuid import uuid4
from openprocurement.auctions.core.tests.base import (
BaseWebTest as CoreBaseWebTest,
BaseAuctionWebTest as CoreBaseAuctionWebTest,
MOCK_CONFIG as BASE_MOCK_CONFIG
)
from openprocurement.auctions.core.utils import (
apply_data_patch,
get_now,
SANDBOX_MODE,
connection_mock_config
)
from openprocurement.auctions.lease.tests.fixtures import PARTIAL_MOCK_CONFIG
from openprocurement.auctions.lease.constants import (
DEFAULT_PROCUREMENT_METHOD_TYPE_LEASE
)
DEFAULT_ACCELERATION = 1440
now = datetime.now()
test_organization = {
"name": u"Державне управління справами",
"identifier": {
"scheme": u"UA-EDR",
"id": u"00037256",
"uri": u"http://www.dus.gov.ua/"
},
"address": {
"countryName": u"Україна",
"postalCode": u"01220",
"region": u"м. Київ",
"locality": u"м. Київ",
"streetAddress": u"вул. Банкова, 11, корпус 1"
},
"contactPoint": {
"name": u"Державне управління справами",
"telephone": u"0440000000"
}
}
test_procuringEntity = test_organization.copy()
test_auction_data = {
"title": u"футляри до державних нагород",
"lotIdentifier": u"219560",
"tenderAttempts": 1,
"procuringEntity": test_procuringEntity,
"value": {
"amount": 100,
"currency": u"UAH"
},
"minimalStep": {
"amount": 35,
"currency": u"UAH"
},
"contractTerms": {
"type": "lease",
"leaseTerms": {
"leaseDuration": "P10Y",
"taxHolidays": [
{
"taxHolidaysDuration": "P5M",
"conditions": "conditions description",
"value": {
"amount": 100.0,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
],
"escalationClauses": [
{
"escalationPeriodicity": "P5M",
"escalationStepPercentage": 0.1055,
"conditions": "conditions description"
}
]
}
},
"items": [
{
"description": u"Земля для військовослужбовців",
"classification": {
"scheme": "CAV-PS",
"id": "04121000-2",
"description": "Земельні ділянки"
},
"additionalClassifications": [{
"scheme": "CPVS",
"id": "PA01-7",
"description": "This field is required."
}],
"unit": {
"name": u"item",
"code": u"44617100-9"
},
"quantity": 5.001,
"contractPeriod": {
"startDate": (now + timedelta(days=2)).isoformat(),
"endDate": (now + timedelta(days=5)).isoformat()
},
"address": {
"countryName": u"Україна",
"postalCode": "79000",
"region": u"м. Київ",
"locality": u"м. Київ",
"streetAddress": u"вул. Банкова 1"
}
}
],
"auctionPeriod": {
"startDate": (now.date() + timedelta(days=14)).isoformat()
},
"procurementMethodType": DEFAULT_PROCUREMENT_METHOD_TYPE_LEASE,
}
if SANDBOX_MODE:
test_auction_data['procurementMethodDetails'] = 'quick, accelerator={}'.format(DEFAULT_ACCELERATION)
test_auction_maximum_data = deepcopy(test_auction_data)
test_auction_maximum_data.update({
"title_en" : u"Cases with state awards",
"title_ru" : u"футляры к государственным наградам",
"description" : u"футляри до державних нагород",
"description_en" : u"Cases with state awards",
"description_ru" : u"футляры к государственным наградам"
})
test_auction_maximum_data["items"][0].update({
"description_en" : u"Cases with state awards",
"description_ru" : u"футляры к государственным наградам"
})
test_features_auction_data = test_auction_data.copy()
test_features_item = test_features_auction_data['items'][0].copy()
test_features_item['id'] = "1"
test_features_auction_data['items'] = [test_features_item]
test_features_auction_data["features"] = [
{
"code": "OCDS-123454-AIR-INTAKE",
"featureOf": "item",
"relatedItem": "1",
"title": u"Потужність всмоктування",
"title_en": "Air Intake",
"description": u"Ефективна потужність всмоктування пилососа, в ватах (аероватах)",
"enum": [
{
"value": 0.1,
"title": u"До 1000 Вт"
},
{
"value": 0.15,
"title": u"Більше 1000 Вт"
}
]
},
{
"code": "OCDS-123454-YEARS",
"featureOf": "tenderer",
"title": u"Років на ринку",
"title_en": "Years trading",
"description": u"Кількість років, які організація учасник працює на ринку",
"enum": [
{
"value": 0.05,
"title": u"До 3 років"
},
{
"value": 0.1,
"title": u"Більше 3 років, менше 5 років"
},
{
"value": 0.15,
"title": u"Більше 5 років"
}
]
}
]
base_test_bids = [
{
"tenderers": [
test_organization
],
"value": {
"amount": 469,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
},
{
"tenderers": [
test_organization
],
"value": {
"amount": 479,
"currency": "UAH",
"valueAddedTaxIncluded": True
}
}
]
test_bids = []
for i in base_test_bids:
i = deepcopy(i)
i.update({'qualified': True})
test_bids.append(i)
test_lots = [
{
'title': 'lot title',
'description': 'lot description',
'value': test_auction_data['value'],
'minimalStep': test_auction_data['minimalStep'],
}
]
test_features = [
{
"code": "code_item",
"featureOf": "item",
"relatedItem": "1",
"title": u"item feature",
"enum": [
{
"value": 0.01,
"title": u"good"
},
{
"value": 0.02,
"title": u"best"
}
]
},
{
"code": "code_tenderer",
"featureOf": "tenderer",
"title": u"tenderer feature",
"enum": [
{
"value": 0.01,
"title": u"good"
},
{
"value": 0.02,
"title": u"best"
}
]
}
]
test_financial_auction_data = deepcopy(test_auction_data)
test_financial_auction_data["procurementMethodType"] = DEFAULT_PROCUREMENT_METHOD_TYPE_LEASE
test_financial_organization = deepcopy(test_organization)
test_financial_organization['additionalIdentifiers'] = [{
"scheme": u"UA-FIN",
"id": u"А01 457213"
}]
test_financial_bids = []
for i in test_bids:
bid = deepcopy(i)
bid.update({'eligible': True})
bid['tenderers'] = [test_financial_organization]
test_financial_bids.append(bid)
MOCK_CONFIG = connection_mock_config(PARTIAL_MOCK_CONFIG,
base=BASE_MOCK_CONFIG,
connector=('plugins', 'api', 'plugins',
'auctions.core', 'plugins'))
class BaseWebTest(CoreBaseWebTest):
"""Base Web Test to test openprocurement.auctions.lease.
It setups the database before each test and delete it after.
"""
relative_to = os.path.dirname(__file__)
mock_config = MOCK_CONFIG
class BaseAuctionWebTest(CoreBaseAuctionWebTest):
relative_to = os.path.dirname(__file__)
initial_data = test_auction_data
initial_organization = test_organization
mock_config = MOCK_CONFIG
def go_to_rectificationPeriod_end(self):
now = get_now()
self.set_status('active.tendering', {
"rectificationPeriod": {
"startDate": (now - timedelta(days=14)).isoformat(),
"endDate": (now - (timedelta(minutes=6) if SANDBOX_MODE else timedelta(days=6))).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=14)).isoformat(),
"endDate": (now + (timedelta(minutes=1) if SANDBOX_MODE else timedelta(days=1))).isoformat()
},
"enquiryPeriod": {
"startDate": (now - timedelta(days=14)).isoformat(),
"endDate": (now + (timedelta(minutes=1) if SANDBOX_MODE else timedelta(days=1))).isoformat()
},
"auctionPeriod": {
"startDate": (now + timedelta(days=1)).isoformat()
}
})
def set_status(self, status, extra=None):
data = {'status': status}
if status == 'active.tendering':
data.update({
"enquiryPeriod": {
"startDate": (now).isoformat(),
"endDate": (now + timedelta(days=7)).isoformat()
},
"rectificationPeriod": {
"startDate": (now).isoformat(),
"endDate": (now + timedelta(days=1)).isoformat()
},
"tenderPeriod": {
"startDate": (now).isoformat(),
"endDate": (now + timedelta(days=7)).isoformat()
}
})
elif status == 'active.auction':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=7)).isoformat(),
"endDate": (now).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=7)).isoformat(),
"endDate": (now - timedelta(days=6)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=7)).isoformat(),
"endDate": (now).isoformat()
},
"auctionPeriod": {
"startDate": (now).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now).isoformat()
}
}
for i in self.initial_lots
]
})
elif status == 'active.qualification':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=6)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
},
"awardPeriod": {
"startDate": (now).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
}
}
for i in self.initial_lots
]
})
elif status == 'active.awarded':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=6)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=8)).isoformat(),
"endDate": (now - timedelta(days=1)).isoformat()
},
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
},
"awardPeriod": {
"startDate": (now).isoformat(),
"endDate": (now).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now - timedelta(days=1)).isoformat(),
"endDate": (now).isoformat()
}
}
for i in self.initial_lots
]
})
elif status == 'complete':
data.update({
"enquiryPeriod": {
"startDate": (now - timedelta(days=18)).isoformat(),
"endDate": (now - timedelta(days=11)).isoformat()
},
"rectificationPeriod": {
"startDate": (now - timedelta(days=18)).isoformat(),
"endDate": (now - timedelta(days=17)).isoformat()
},
"tenderPeriod": {
"startDate": (now - timedelta(days=18)).isoformat(),
"endDate": (now - timedelta(days=11)).isoformat()
},
"auctionPeriod": {
"startDate": (now - timedelta(days=11)).isoformat(),
"endDate": (now - timedelta(days=10)).isoformat()
},
"awardPeriod": {
"startDate": (now - timedelta(days=10)).isoformat(),
"endDate": (now - timedelta(days=10)).isoformat()
}
})
if self.initial_lots:
data.update({
'lots': [
{
"auctionPeriod": {
"startDate": (now - timedelta(days=11)).isoformat(),
"endDate": (now - timedelta(days=10)).isoformat()
}
}
for i in self.initial_lots
]
})
if extra:
data.update(extra)
auction = self.db.get(self.auction_id)
auction.update(apply_data_patch(auction, data))
self.db.save(auction)
authorization = self.app.authorization
self.app.authorization = ('Basic', ('chronograph', ''))
#response = self.app.patch_json('/auctions/{}'.format(self.auction_id), {'data': {'id': self.auction_id}})
response = self.app.get('/auctions/{}'.format(self.auction_id))
self.app.authorization = authorization
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
return response
def upload_auction_protocol(self, award):
award_id = award['id']
response = self.app.post_json('/auctions/{}/awards/{}/documents?acc_token={}'.format(self.auction_id, award_id, self.auction_token),
{'data': {
'title': 'auction_protocol.pdf',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
"description": "auction protocol",
"documentType": 'auctionProtocol',
}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual('auction_protocol.pdf', response.json["data"]["title"])
self.assertEqual('auctionProtocol', response.json["data"]["documentType"])
self.assertEqual('auction_owner', response.json["data"]["author"])
def post_auction_results(self):
authorization = self.app.authorization
self.app.authorization = ('Basic', ('auction', ''))
now = get_now()
auction_result = {
'bids': [
{
"id": b['id'],
"date": (now - timedelta(seconds=i)).isoformat(),
"value": b['value']
}
for i, b in enumerate(self.initial_bids)
]
}
response = self.app.post_json('/auctions/{}/auction'.format(self.auction_id), {'data': auction_result})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
auction = response.json['data']
self.assertEqual('active.qualification', auction["status"])
self.first_award = auction['awards'][0]
self.second_award = auction['awards'][1]
self.first_award_id = self.first_award['id']
self.second_award_id = self.second_award['id']
self.app.authorization = authorization
def generate_docservice_url(self):
uuid = uuid4().hex
key = self.app.app.registry.docservice_key
keyid = key.hex_vk()[:8]
signature = b64encode(key.signature("{}\0{}".format(uuid, '0' * 32)))
query = {'Signature': signature, 'KeyID': keyid}
return "http://localhost/get/{}?{}".format(uuid, urlencode(query))
def check_award_status(self, auction_id, award_id, target_status):
response = self.app.get(
'/auctions/{0}/awards/{1}'.format(
auction_id,
award_id))
current_status = response.json['data']['status']
self.assertEqual(
current_status,
target_status,
"Award status {0} isn't expected. Current status: {1}".format(
current_status,
target_status))
def patch_award(self, award_id, status, bid_token=None):
token = self.auction_token
if bid_token:
token = bid_token
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, token
), {"data": {"status": status}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
return response
def forbidden_patch_award(self, award_id, before_status, status):
response = self.app.patch_json('/auctions/{}/awards/{}?acc_token={}'.format(
self.auction_id, award_id, self.auction_token
), {"data": {"status": status}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"], "Can't switch award ({}) status to ({}) status".format(before_status, status))
class BaseFinancialAuctionWebTest(BaseAuctionWebTest):
relative_to = os.path.dirname(__file__)
initial_data = test_financial_auction_data
initial_organization = test_financial_organization
| 35.390158
| 146
| 0.498734
|
1badc4a5e7f0f5b1236d514d0d9e94d563194731
| 397
|
py
|
Python
|
djangocrud/djangocrud/asgi.py
|
roygoswamisuvankar/Djang-Crud
|
83b488971b1f6e437a3c3e3b8b53c65a5e4e9db8
|
[
"MIT"
] | 2
|
2021-09-24T05:38:42.000Z
|
2021-09-24T09:45:32.000Z
|
djangocrud/asgi.py
|
a-m4hdi/djangocrud
|
dce70c3e2a4bcfb5b67c9331628d3fda0fe51687
|
[
"MIT"
] | null | null | null |
djangocrud/asgi.py
|
a-m4hdi/djangocrud
|
dce70c3e2a4bcfb5b67c9331628d3fda0fe51687
|
[
"MIT"
] | null | null | null |
"""
ASGI config for djangocrud project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangocrud.settings')
application = get_asgi_application()
| 23.352941
| 78
| 0.788413
|
302a49b81f8a6d8ea143b5ea2fffdb21e6180ead
| 486
|
py
|
Python
|
profiles_api/migrations/0003_profilefeeditem_updated_on.py
|
prajilmv/profiles-rest-api
|
96c19ab62f59f8eeb2ca5917ac1f10133c634f7b
|
[
"MIT"
] | null | null | null |
profiles_api/migrations/0003_profilefeeditem_updated_on.py
|
prajilmv/profiles-rest-api
|
96c19ab62f59f8eeb2ca5917ac1f10133c634f7b
|
[
"MIT"
] | 6
|
2020-06-06T01:39:25.000Z
|
2022-02-10T14:23:41.000Z
|
profiles_api/migrations/0003_profilefeeditem_updated_on.py
|
prajilmv/profiles-rest-api
|
96c19ab62f59f8eeb2ca5917ac1f10133c634f7b
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2 on 2020-04-26 12:29
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0002_profilefeeditem'),
]
operations = [
migrations.AddField(
model_name='profilefeeditem',
name='updated_on',
field=models.DateTimeField(default=django.utils.timezone.now),
preserve_default=False,
),
]
| 23.142857
| 74
| 0.63786
|
5605866908e5d7819e21f4724552b3692c098712
| 7,059
|
py
|
Python
|
homeassistant/components/saj/sensor.py
|
bluestripe/home-assistant
|
8791a48328076dd61c1f3e80c10dc54a7c8c9c18
|
[
"Apache-2.0"
] | 2
|
2019-11-06T17:13:45.000Z
|
2019-11-06T17:13:51.000Z
|
homeassistant/components/saj/sensor.py
|
bluestripe/home-assistant
|
8791a48328076dd61c1f3e80c10dc54a7c8c9c18
|
[
"Apache-2.0"
] | 2
|
2021-02-08T20:37:03.000Z
|
2021-09-08T01:23:59.000Z
|
homeassistant/components/saj/sensor.py
|
bluestripe/home-assistant
|
8791a48328076dd61c1f3e80c10dc54a7c8c9c18
|
[
"Apache-2.0"
] | null | null | null |
"""SAJ solar inverter interface."""
import asyncio
from datetime import date
import logging
import pysaj
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
MASS_KILOGRAMS,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CALLBACK_TYPE, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
MIN_INTERVAL = 5
MAX_INTERVAL = 300
UNIT_OF_MEASUREMENT_HOURS = "h"
INVERTER_TYPES = ["ethernet", "wifi"]
SAJ_UNIT_MAPPINGS = {
"W": POWER_WATT,
"kWh": ENERGY_KILO_WATT_HOUR,
"h": UNIT_OF_MEASUREMENT_HOURS,
"kg": MASS_KILOGRAMS,
"°C": TEMP_CELSIUS,
"": None,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_TYPE, default=INVERTER_TYPES[0]): vol.In(INVERTER_TYPES),
vol.Inclusive(CONF_USERNAME, "credentials"): cv.string,
vol.Inclusive(CONF_PASSWORD, "credentials"): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up SAJ sensors."""
remove_interval_update = None
wifi = config[CONF_TYPE] == INVERTER_TYPES[1]
# Init all sensors
sensor_def = pysaj.Sensors(wifi)
# Use all sensors by default
hass_sensors = []
for sensor in sensor_def:
hass_sensors.append(SAJsensor(sensor))
kwargs = {}
if wifi:
kwargs["wifi"] = True
if config.get(CONF_USERNAME) and config.get(CONF_PASSWORD):
kwargs["username"] = config[CONF_USERNAME]
kwargs["password"] = config[CONF_PASSWORD]
try:
saj = pysaj.SAJ(config[CONF_HOST], **kwargs)
await saj.read(sensor_def)
except pysaj.UnauthorizedException:
_LOGGER.error("Username and/or password is wrong.")
return
except pysaj.UnexpectedResponseException as err:
_LOGGER.error(
"Error in SAJ, please check host/ip address. Original error: %s", err
)
return
async_add_entities(hass_sensors)
async def async_saj():
"""Update all the SAJ sensors."""
tasks = []
values = await saj.read(sensor_def)
for sensor in hass_sensors:
state_unknown = False
if not values:
# SAJ inverters are powered by DC via solar panels and thus are
# offline after the sun has set. If a sensor resets on a daily
# basis like "today_yield", this reset won't happen automatically.
# Code below checks if today > day when sensor was last updated
# and if so: set state to None.
# Sensors with live values like "temperature" or "current_power"
# will also be reset to None.
if (sensor.per_day_basis and date.today() > sensor.date_updated) or (
not sensor.per_day_basis and not sensor.per_total_basis
):
state_unknown = True
task = sensor.async_update_values(unknown_state=state_unknown)
if task:
tasks.append(task)
if tasks:
await asyncio.wait(tasks)
return values
def start_update_interval(event):
"""Start the update interval scheduling."""
nonlocal remove_interval_update
remove_interval_update = async_track_time_interval_backoff(hass, async_saj)
def stop_update_interval(event):
"""Properly cancel the scheduled update."""
remove_interval_update() # pylint: disable=not-callable
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_update_interval)
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, stop_update_interval)
@callback
def async_track_time_interval_backoff(hass, action) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively and increases the interval when failed."""
remove = None
interval = MIN_INTERVAL
async def interval_listener(now=None):
"""Handle elapsed interval with backoff."""
nonlocal interval, remove
try:
if await action():
interval = MIN_INTERVAL
else:
interval = min(interval * 2, MAX_INTERVAL)
finally:
remove = async_call_later(hass, interval, interval_listener)
hass.async_create_task(interval_listener())
def remove_listener():
"""Remove interval listener."""
if remove:
remove() # pylint: disable=not-callable
return remove_listener
class SAJsensor(Entity):
"""Representation of a SAJ sensor."""
def __init__(self, pysaj_sensor):
"""Initialize the sensor."""
self._sensor = pysaj_sensor
self._state = self._sensor.value
@property
def name(self):
"""Return the name of the sensor."""
return f"saj_{self._sensor.name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SAJ_UNIT_MAPPINGS[self._sensor.unit]
@property
def device_class(self):
"""Return the device class the sensor belongs to."""
if self.unit_of_measurement == POWER_WATT:
return DEVICE_CLASS_POWER
if (
self.unit_of_measurement == TEMP_CELSIUS
or self._sensor.unit == TEMP_FAHRENHEIT
):
return DEVICE_CLASS_TEMPERATURE
@property
def should_poll(self) -> bool:
"""SAJ sensors are updated & don't poll."""
return False
@property
def per_day_basis(self) -> bool:
"""Return if the sensors value is on daily basis or not."""
return self._sensor.per_day_basis
@property
def per_total_basis(self) -> bool:
"""Return if the sensors value is cummulative or not."""
return self._sensor.per_total_basis
@property
def date_updated(self) -> date:
"""Return the date when the sensor was last updated."""
return self._sensor.date
def async_update_values(self, unknown_state=False):
"""Update this sensor."""
update = False
if self._sensor.value != self._state:
update = True
self._state = self._sensor.value
if unknown_state and self._state is not None:
update = True
self._state = None
return self.async_update_ha_state() if update else None
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
return f"{self._sensor.name}"
| 30.166667
| 88
| 0.647825
|
bcfe191ae4c6eb729a4ffc010b6e0bae62bb544d
| 330
|
py
|
Python
|
spine_json_lib/data/constants.py
|
ivan-ah/spine-json-lib
|
1ea8460127f005d57af56090d2a48e6039437306
|
[
"MIT"
] | 6
|
2019-12-02T15:25:57.000Z
|
2021-11-02T04:14:19.000Z
|
spine_json_lib/data/constants.py
|
ivan-ah/spine-json-lib
|
1ea8460127f005d57af56090d2a48e6039437306
|
[
"MIT"
] | 3
|
2020-03-20T11:09:22.000Z
|
2022-02-18T10:07:26.000Z
|
spine_json_lib/data/constants.py
|
ivan-ah/spine-json-lib
|
1ea8460127f005d57af56090d2a48e6039437306
|
[
"MIT"
] | 2
|
2019-12-02T14:56:50.000Z
|
2020-02-24T07:53:20.000Z
|
from spine_json_lib.data.spine_version_type import SpineVersion
SPINE_3_7_VERSION = SpineVersion("3.7")
SPINE_3_8_VERSION = SpineVersion("3.8")
SPINE_LATEST_3_7_VERSION = SpineVersion("3.7.93")
JSON_SPINE_TEMPLATE = "export_json_config.json"
SKEL_SPINE_TEMPLATE = "export_bin_config.json"
UI_SPINE_TEMPLATE = "export_png.json"
| 30
| 63
| 0.824242
|
925a183eb7dd4504a7fc1b83aefede15fbee1b41
| 1,672
|
py
|
Python
|
slam/remeshing.py
|
aymanesouani/slam
|
85ecfa392e60babf87e5a34ef66d439ba62ae243
|
[
"MIT"
] | 6
|
2019-06-07T16:01:06.000Z
|
2020-12-04T12:43:54.000Z
|
slam/remeshing.py
|
aymanesouani/slam
|
85ecfa392e60babf87e5a34ef66d439ba62ae243
|
[
"MIT"
] | 25
|
2020-06-22T20:42:06.000Z
|
2021-01-01T09:52:22.000Z
|
slam/remeshing.py
|
aymanesouani/slam
|
85ecfa392e60babf87e5a34ef66d439ba62ae243
|
[
"MIT"
] | 17
|
2019-09-10T13:19:03.000Z
|
2021-12-14T15:53:49.000Z
|
def spherical_interpolation_nearest_neigbhor(source_spherical_mesh,
target_spherical_mesh,
info_to_interpolate):
"""
nearest neighbor interpolation between two spheres
For each vertex of target_spherical_mesh, find the nearest one
in source_spherical_mesh and use this vertex-level correspondence
to pick values in info_to_interpolate
:param source_spherical_mesh: spherical Trimesh object
:param target_spherical_mesh: spherical Trimesh object
:param info_to_interpolate: vector with shape[0] equal to the
number of vertices in source_spherical_mesh
:return: interpolated info_to_interpolate
"""
# This line would interpolate the reverse way (each vertex of
# source_spherical_mesh, find the nearest one in target_spherical_mesh
# distance, index =
# target_spherical_mesh.kdtree.query(source_spherical_mesh.vertices)
# import time
# t0 = time.time()
# the use of kdtree from trimesh is ~100x faster than the loop hereafter
distance, index = \
source_spherical_mesh.kdtree.query(target_spherical_mesh.vertices)
# t1 = time.time()
# source_vertex_number = source_spherical_mesh.vertices.shape[0]
# nn_corresp = []
# for v in target_spherical_mesh.vertices:
# nn_tmp = np.argmin(np.sum(np.square(
# np.tile(v, (source_vertex_number, 1))
# - source_spherical_mesh.vertices), 1))
# nn_corresp.append(nn_tmp)
# t2 = time.time()
# print('with kdtree :'+str(t1-t0))
# print('with loop :'+str(t2-t1))
return info_to_interpolate[index]
| 39.809524
| 76
| 0.687201
|
bb4a3b93e30e0b72719ed6c6d02436b309b40564
| 52,531
|
py
|
Python
|
python/ccxt/async_support/kraken.py
|
xeddmc/ccxt
|
9ddd88e6bbc4b2162cf45d331995bb86235d2a59
|
[
"MIT"
] | 1
|
2021-07-07T10:47:28.000Z
|
2021-07-07T10:47:28.000Z
|
python/ccxt/async_support/kraken.py
|
xeddmc/ccxt
|
9ddd88e6bbc4b2162cf45d331995bb86235d2a59
|
[
"MIT"
] | 1
|
2020-12-13T04:57:39.000Z
|
2020-12-13T04:57:39.000Z
|
python/ccxt/async_support/kraken.py
|
xeddmc/ccxt
|
9ddd88e6bbc4b2162cf45d331995bb86235d2a59
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import base64
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
class kraken (Exchange):
def describe(self):
return self.deep_extend(super(kraken, self).describe(), {
'id': 'kraken',
'name': 'Kraken',
'countries': ['US'],
'version': '0',
'rateLimit': 3000,
'certified': True,
'has': {
'createDepositAddress': True,
'fetchDepositAddress': True,
'fetchTradingFee': True,
'fetchTradingFees': True,
'CORS': False,
'fetchCurrencies': True,
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchWithdrawals': True,
'fetchDeposits': True,
'withdraw': True,
'fetchLedgerEntry': True,
'fetchLedger': True,
},
'marketsByAltname': {},
'timeframes': {
'1m': '1',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'4h': '240',
'1d': '1440',
'1w': '10080',
'2w': '21600',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766599-22709304-5ede-11e7-9de1-9f33732e1509.jpg',
'api': {
'public': 'https://api.kraken.com',
'private': 'https://api.kraken.com',
'zendesk': 'https://support.kraken.com/hc/en-us/articles/',
},
'www': 'https://www.kraken.com',
'doc': 'https://www.kraken.com/features/api',
'fees': 'https://www.kraken.com/en-us/features/fee-schedule',
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': 0.26 / 100,
'maker': 0.16 / 100,
'tiers': {
'taker': [
[0, 0.0026],
[50000, 0.0024],
[100000, 0.0022],
[250000, 0.0020],
[500000, 0.0018],
[1000000, 0.0016],
[2500000, 0.0014],
[5000000, 0.0012],
[10000000, 0.0001],
],
'maker': [
[0, 0.0016],
[50000, 0.0014],
[100000, 0.0012],
[250000, 0.0010],
[500000, 0.0008],
[1000000, 0.0006],
[2500000, 0.0004],
[5000000, 0.0002],
[10000000, 0.0],
],
},
},
# self is a bad way of hardcoding fees that change on daily basis
# hardcoding is now considered obsolete, we will remove all of it eventually
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'ETH': 0.005,
'XRP': 0.02,
'XLM': 0.00002,
'LTC': 0.02,
'DOGE': 2,
'ZEC': 0.00010,
'ICN': 0.02,
'REP': 0.01,
'ETC': 0.005,
'MLN': 0.003,
'XMR': 0.05,
'DASH': 0.005,
'GNO': 0.01,
'EOS': 0.5,
'BCH': 0.001,
'XTZ': 0.05,
'USD': 5, # if domestic wire
'EUR': 5, # if domestic wire
'CAD': 10, # CAD EFT Withdrawal
'JPY': 300, # if domestic wire
},
'deposit': {
'BTC': 0,
'ETH': 0,
'XRP': 0,
'XLM': 0,
'LTC': 0,
'DOGE': 0,
'ZEC': 0,
'ICN': 0,
'REP': 0,
'ETC': 0,
'MLN': 0,
'XMR': 0,
'DASH': 0,
'GNO': 0,
'EOS': 0,
'BCH': 0,
'XTZ': 0.05,
'USD': 5, # if domestic wire
'EUR': 0, # free deposit if EUR SEPA Deposit
'CAD': 5, # if domestic wire
'JPY': 0, # Domestic Deposit(Free, ¥5,000 deposit minimum)
},
},
},
'api': {
'zendesk': {
'get': [
# we should really refrain from putting fixed fee numbers and stop hardcoding
# we will be using their web APIs to scrape all numbers from these articles
'205893708-What-is-the-minimum-order-size-',
'201396777-What-are-the-deposit-fees-',
'201893608-What-are-the-withdrawal-fees-',
],
},
'public': {
'get': [
'Assets',
'AssetPairs',
'Depth',
'OHLC',
'Spread',
'Ticker',
'Time',
'Trades',
],
},
'private': {
'post': [
'AddOrder',
'AddExport',
'Balance',
'CancelOrder',
'ClosedOrders',
'DepositAddresses',
'DepositMethods',
'DepositStatus',
'ExportStatus',
'Ledgers',
'OpenOrders',
'OpenPositions',
'QueryLedgers',
'QueryOrders',
'QueryTrades',
'RetrieveExport',
'RemoveExport',
'TradeBalance',
'TradesHistory',
'TradeVolume',
'Withdraw',
'WithdrawCancel',
'WithdrawInfo',
'WithdrawStatus',
],
},
},
'commonCurrencies': {
'XBT': 'BTC',
'XDG': 'DOGE',
},
'options': {
'cacheDepositMethodsOnFetchDepositAddress': True, # will issue up to two calls in fetchDepositAddress
'depositMethods': {},
'delistedMarketsById': {},
# cannot withdraw/deposit these
'inactiveCurrencies': ['CAD', 'USD', 'JPY', 'GBP'],
},
'exceptions': {
'EAPI:Invalid key': AuthenticationError,
'EFunding:Unknown withdraw key': ExchangeError,
'EFunding:Invalid amount': InsufficientFunds,
'EService:Unavailable': ExchangeNotAvailable,
'EDatabase:Internal error': ExchangeNotAvailable,
'EService:Busy': ExchangeNotAvailable,
'EQuery:Unknown asset': ExchangeError,
'EAPI:Rate limit exceeded': DDoSProtection,
'EOrder:Rate limit exceeded': DDoSProtection,
'EGeneral:Internal error': ExchangeNotAvailable,
'EGeneral:Temporary lockout': DDoSProtection,
'EGeneral:Permission denied': PermissionDenied,
},
})
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['price'], DECIMAL_PLACES)
def fee_to_precision(self, symbol, fee):
return self.decimal_to_precision(fee, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
async def fetch_min_order_amounts(self):
html = await self.zendeskGet205893708WhatIsTheMinimumOrderSize()
parts = html.split('<td class="wysiwyg-text-align-right">')
numParts = len(parts)
if numParts < 3:
raise NotSupported(self.id + ' fetchMinOrderAmounts HTML page markup has changed: https://support.kraken.com/hc/en-us/articles/205893708-What-is-the-minimum-order-size-')
result = {}
# skip the part before the header and the header itself
for i in range(2, len(parts)):
part = parts[i]
chunks = part.split('</td>')
amountAndCode = chunks[0]
if amountAndCode != 'To Be Announced':
pieces = amountAndCode.split(' ')
numPieces = len(pieces)
if numPieces == 2:
amount = float(pieces[0])
code = self.safe_currency_code(pieces[1])
result[code] = amount
return result
async def fetch_markets(self, params={}):
response = await self.publicGetAssetPairs(params)
limits = await self.fetch_min_order_amounts()
keys = list(response['result'].keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = response['result'][id]
baseId = market['base']
quoteId = market['quote']
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
darkpool = id.find('.d') >= 0
symbol = market['altname'] if darkpool else (base + '/' + quote)
maker = None
if 'fees_maker' in market:
maker = float(market['fees_maker'][0][1]) / 100
precision = {
'amount': market['lot_decimals'],
'price': market['pair_decimals'],
}
minAmount = math.pow(10, -precision['amount'])
if base in limits:
minAmount = limits[base]
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'darkpool': darkpool,
'info': market,
'altname': market['altname'],
'maker': maker,
'taker': float(market['fees'][0][1]) / 100,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': minAmount,
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
})
result = self.append_inactive_markets(result)
self.marketsByAltname = self.index_by(result, 'altname')
return result
def safe_currency_code(self, currencyId, currency=None):
if len(currencyId) > 3:
if (currencyId.find('X') == 0) or (currencyId.find('Z') == 0):
currencyId = currencyId[1:]
return super(kraken, self).safe_currency_code(currencyId, currency)
def append_inactive_markets(self, result):
# result should be an array to append to
precision = {'amount': 8, 'price': 8}
costLimits = {'min': 0, 'max': None}
priceLimits = {'min': math.pow(10, -precision['price']), 'max': None}
amountLimits = {'min': math.pow(10, -precision['amount']), 'max': math.pow(10, precision['amount'])}
limits = {'amount': amountLimits, 'price': priceLimits, 'cost': costLimits}
defaults = {
'darkpool': False,
'info': None,
'maker': None,
'taker': None,
'active': False,
'precision': precision,
'limits': limits,
}
markets = [
# {'id': 'XXLMZEUR', 'symbol': 'XLM/EUR', 'base': 'XLM', 'quote': 'EUR', 'altname': 'XLMEUR'},
]
for i in range(0, len(markets)):
result.append(self.extend(defaults, markets[i]))
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetAssets(params)
#
# {
# "error": [],
# "result": {
# "ADA": {"aclass": "currency", "altname": "ADA", "decimals": 8, "display_decimals": 6},
# "BCH": {"aclass": "currency", "altname": "BCH", "decimals": 10, "display_decimals": 5},
# ...
# },
# }
#
currencies = self.safe_value(response, 'result')
ids = list(currencies.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
currency = currencies[id]
# todo: will need to rethink the fees
# see: https://support.kraken.com/hc/en-us/articles/201893608-What-are-the-withdrawal-fees-
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
code = self.safe_currency_code(self.safe_string(currency, 'altname'))
precision = self.safe_integer(currency, 'decimals')
# assumes all currencies are active except those listed above
active = not self.in_array(code, self.options['inactiveCurrencies'])
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': code,
'active': active,
'fee': None,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
}
return result
async def fetch_trading_fees(self, params={}):
await self.load_markets()
self.check_required_credentials()
response = await self.privatePostTradeVolume(params)
tradedVolume = self.safe_float(response['result'], 'volume')
tiers = self.fees['trading']['tiers']
taker = tiers['taker'][1]
maker = tiers['maker'][1]
for i in range(0, len(tiers['taker'])):
if tradedVolume >= tiers['taker'][i][0]:
taker = tiers['taker'][i][1]
for i in range(0, len(tiers['maker'])):
if tradedVolume >= tiers['maker'][i][0]:
maker = tiers['maker'][i][1]
return {
'info': response,
'maker': maker,
'taker': taker,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
if market['darkpool']:
raise ExchangeError(self.id + ' does not provide an order book for darkpool symbol ' + symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['count'] = limit # 100
response = await self.publicGetDepth(self.extend(request, params))
orderbook = response['result'][market['id']]
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
baseVolume = float(ticker['v'][1])
vwap = float(ticker['p'][1])
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = float(ticker['c'][0])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['h'][1]),
'low': float(ticker['l'][1]),
'bid': float(ticker['b'][0]),
'bidVolume': None,
'ask': float(ticker['a'][0]),
'askVolume': None,
'vwap': vwap,
'open': self.safe_float(ticker, 'o'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
symbols = self.symbols if (symbols is None) else symbols
marketIds = []
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
market = self.markets[symbol]
if market['active'] and not market['darkpool']:
marketIds.append(market['id'])
request = {
'pair': ','.join(marketIds),
}
response = await self.publicGetTicker(self.extend(request, params))
tickers = response['result']
ids = list(tickers.keys())
result = {}
for i in range(0, len(ids)):
id = ids[i]
market = self.markets_by_id[id]
symbol = market['symbol']
ticker = tickers[id]
if self.in_array(symbol, symbols):
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
darkpool = symbol.find('.d') >= 0
if darkpool:
raise ExchangeError(self.id + ' does not provide a ticker for darkpool symbol ' + symbol)
market = self.market(symbol)
request = {
'pair': market['id'],
}
response = await self.publicGetTicker(self.extend(request, params))
ticker = response['result'][market['id']]
return self.parse_ticker(ticker, market)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0] * 1000,
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
float(ohlcv[6]),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['since'] = int((since - 1) / 1000)
response = await self.publicGetOHLC(self.extend(request, params))
ohlcvs = response['result'][market['id']]
return self.parse_ohlcvs(ohlcvs, market, timeframe, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'trade': 'trade',
'withdrawal': 'transaction',
'deposit': 'transaction',
'transfer': 'transfer',
'margin': 'margin',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# 'LTFK7F-N2CUX-PNY4SX': {
# refid: "TSJTGT-DT7WN-GPPQMJ",
# time: 1520102320.555,
# type: "trade",
# aclass: "currency",
# asset: "XETH",
# amount: "0.1087194600",
# fee: "0.0000000000",
# balance: "0.2855851000"
# },
# ...
# }
#
id = self.safe_string(item, 'id')
direction = None
account = None
referenceId = self.safe_string(item, 'refid')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'asset'), currency)
amount = self.safe_float(item, 'amount')
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
time = self.safe_float(item, 'time')
timestamp = None
if time is not None:
timestamp = int(time * 1000)
fee = {
'cost': self.safe_float(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_float(item, 'balance')
status = 'ok'
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
# https://www.kraken.com/features/api#get-ledgers-info
await self.load_markets()
request = {}
currency = None
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['start'] = int(since / 1000)
response = await self.privatePostLedgers(self.extend(request, params))
# { error: [],
# result: {ledger: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N",
# time: 1520103488.314,
# type: "withdrawal",
# aclass: "currency",
# asset: "XETH",
# amount: "-0.2805800000",
# fee: "0.0050000000",
# balance: "0.0000051000" },
result = self.safe_value(response, 'result', {})
ledger = self.safe_value(result, 'ledger', {})
keys = list(ledger.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = ledger[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items, currency, since, limit)
async def fetch_ledger_entries_by_ids(self, ids, code=None, params={}):
# https://www.kraken.com/features/api#query-ledgers
await self.load_markets()
ids = ','.join(ids)
request = self.extend({
'id': ids,
}, params)
response = await self.privatePostQueryLedgers(request)
# { error: [],
# result: {'LPUAIB-TS774-UKHP7X': { refid: "A2B4HBV-L4MDIE-JU4N3N",
# time: 1520103488.314,
# type: "withdrawal",
# aclass: "currency",
# asset: "XETH",
# amount: "-0.2805800000",
# fee: "0.0050000000",
# balance: "0.0000051000" }} }
result = response['result']
keys = list(result.keys())
items = []
for i in range(0, len(keys)):
key = keys[i]
value = result[key]
value['id'] = key
items.append(value)
return self.parse_ledger(items)
async def fetch_ledger_entry(self, id, code=None, params={}):
items = await self.fetch_ledger_entries_by_ids([id], code, params)
return items[0]
def parse_trade(self, trade, market=None):
timestamp = None
side = None
type = None
price = None
amount = None
id = None
order = None
fee = None
marketId = self.safe_string(trade, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
symbol = None
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
# delisted market ids go here
market = self.get_delisted_market_by_id(marketId)
if market is not None:
symbol = market['symbol']
if 'ordertxid' in trade:
order = trade['ordertxid']
id = self.safe_string_2(trade, 'id', 'postxid')
timestamp = self.safe_timestamp(trade, 'time')
side = trade['type']
type = trade['ordertype']
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'vol')
if 'fee' in trade:
currency = None
if market:
currency = market['quote']
fee = {
'cost': self.safe_float(trade, 'fee'),
'currency': currency,
}
else:
timestamp = int(trade[2] * 1000)
side = 'sell' if (trade[3] == 's') else 'buy'
type = 'limit' if (trade[4] == 'l') else 'market'
price = float(trade[0])
amount = float(trade[1])
tradeLength = len(trade)
if tradeLength > 6:
id = trade[6] # artificially added as per #1794
return {
'id': id,
'order': order,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': price * amount,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
id = market['id']
request = {
'pair': id,
}
response = await self.publicGetTrades(self.extend(request, params))
#
# {
# "error": [],
# "result": {
# "XETHXXBT": [
# ["0.032310","4.28169434",1541390792.763,"s","l",""]
# ],
# "last": "1541439421200678657"
# }
# }
#
result = response['result']
trades = result[id]
# trades is a sorted array: last(most recent trade) goes last
length = len(trades)
if length <= 0:
return []
lastTrade = trades[length - 1]
lastTradeId = self.safe_string(result, 'last')
lastTrade.append(lastTradeId)
return self.parse_trades(trades, market, since, limit)
async def fetch_balance(self, params={}):
response = await self.privatePostBalance(params)
balances = self.safe_value(response, 'result', {})
result = {'info': balances}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_float(balances, currencyId)
result[code] = account
return self.parse_balance(result)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'ordertype': type,
'volume': self.amount_to_precision(symbol, amount),
}
priceIsDefined = (price is not None)
marketOrder = (type == 'market')
limitOrder = (type == 'limit')
shouldIncludePrice = limitOrder or (not marketOrder and priceIsDefined)
if shouldIncludePrice:
request['price'] = self.price_to_precision(symbol, price)
response = await self.privatePostAddOrder(self.extend(request, params))
id = self.safe_value(response['result'], 'txid')
if id is not None:
if isinstance(id, list):
length = len(id)
id = id if (length > 1) else id[0]
return {
'id': id,
'info': response,
'timestamp': None,
'datetime': None,
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'average': None,
'filled': None,
'remaining': None,
'status': None,
'fee': None,
'trades': None,
}
def find_market_by_altname_or_id(self, id):
if id in self.marketsByAltname:
return self.marketsByAltname[id]
elif id in self.markets_by_id:
return self.markets_by_id[id]
return None
def get_delisted_market_by_id(self, id):
if id is None:
return id
market = self.safe_value(self.options['delistedMarketsById'], id)
if market is not None:
return market
baseIdStart = 0
baseIdEnd = 3
quoteIdStart = 3
quoteIdEnd = 6
if len(id) == 8:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 8
elif len(id) == 7:
baseIdEnd = 4
quoteIdStart = 4
quoteIdEnd = 7
baseId = id[baseIdStart:baseIdEnd]
quoteId = id[quoteIdStart:quoteIdEnd]
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
market = {
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
}
self.options['delistedMarketsById'][id] = market
return market
def parse_order_status(self, status):
statuses = {
'pending': 'open', # order pending book entry
'open': 'open',
'closed': 'closed',
'canceled': 'canceled',
'expired': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
description = self.safe_value(order, 'descr', {})
side = self.safe_string(description, 'type')
type = self.safe_string(description, 'ordertype')
marketId = self.safe_string(description, 'pair')
foundMarket = self.find_market_by_altname_or_id(marketId)
symbol = None
if foundMarket is not None:
market = foundMarket
elif marketId is not None:
# delisted market ids go here
market = self.get_delisted_market_by_id(marketId)
timestamp = self.safe_timestamp(order, 'opentm')
amount = self.safe_float(order, 'vol')
filled = self.safe_float(order, 'vol_exec')
remaining = amount - filled
fee = None
cost = self.safe_float(order, 'cost')
price = self.safe_float(description, 'price')
if (price is None) or (price == 0):
price = self.safe_float(description, 'price2')
if (price is None) or (price == 0):
price = self.safe_float(order, 'price', price)
average = self.safe_float(order, 'price')
if market is not None:
symbol = market['symbol']
if 'fee' in order:
flags = order['oflags']
feeCost = self.safe_float(order, 'fee')
fee = {
'cost': feeCost,
'rate': None,
}
if flags.find('fciq') >= 0:
fee['currency'] = market['quote']
elif flags.find('fcib') >= 0:
fee['currency'] = market['base']
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'id')
return {
'id': id,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'filled': filled,
'average': average,
'remaining': remaining,
'fee': fee,
# 'trades': self.parse_trades(order['trades'], market),
}
def parse_orders(self, orders, market=None, since=None, limit=None, params={}):
result = []
ids = list(orders.keys())
symbol = None
if market is not None:
symbol = market['symbol']
for i in range(0, len(ids)):
id = ids[i]
order = self.extend({'id': id}, orders[id])
result.append(self.extend(self.parse_order(order, market), params))
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostQueryOrders(self.extend({
'trades': True, # whether or not to include trades in output(optional, default False)
'txid': id, # do not comma separate a list of ids - use fetchOrdersByIds instead
# 'userref': 'optional', # restrict results to given user reference id(optional)
}, params))
orders = response['result']
order = self.parse_order(self.extend({'id': id}, orders[id]))
return self.extend({'info': response}, order)
async def fetch_orders_by_ids(self, ids, symbol=None, params={}):
await self.load_markets()
response = await self.privatePostQueryOrders(self.extend({
'trades': True, # whether or not to include trades in output(optional, default False)
'txid': ','.join(ids), # comma delimited list of transaction ids to query info about(20 maximum)
}, params))
result = self.safe_value(response, 'result', {})
orders = []
orderIds = list(result.keys())
for i in range(0, len(orderIds)):
id = orderIds[i]
item = result[id]
order = self.parse_order(self.extend({'id': id}, item))
orders.append(order)
return orders
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'type': 'all', # any position, closed position, closing position, no position
# 'trades': False, # whether or not to include trades related to position in output
# 'start': 1234567890, # starting unix timestamp or trade tx id of results(exclusive)
# 'end': 1234567890, # ending unix timestamp or trade tx id of results(inclusive)
# 'ofs' = result offset
}
if since is not None:
request['start'] = int(since / 1000)
response = await self.privatePostTradesHistory(self.extend(request, params))
#
# {
# "error": [],
# "result": {
# "trades": {
# "GJ3NYQ-XJRTF-THZABF": {
# "ordertxid": "TKH2SE-ZIF5E-CFI7LT",
# "postxid": "OEN3VX-M7IF5-JNBJAM",
# "pair": "XICNXETH",
# "time": 1527213229.4491,
# "type": "sell",
# "ordertype": "limit",
# "price": "0.001612",
# "cost": "0.025792",
# "fee": "0.000026",
# "vol": "16.00000000",
# "margin": "0.000000",
# "misc": ""
# },
# ...
# },
# "count": 9760,
# },
# }
#
trades = response['result']['trades']
ids = list(trades.keys())
for i in range(0, len(ids)):
trades[ids[i]]['id'] = ids[i]
result = self.parse_trades(trades, None, since, limit)
if symbol is None:
return result
return self.filter_by_symbol(result, symbol)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
response = None
try:
response = await self.privatePostCancelOrder(self.extend({
'txid': id,
}, params))
except Exception as e:
if self.last_http_response:
if self.last_http_response.find('EOrder:Unknown order') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() error ' + self.last_http_response)
raise e
return response
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
response = await self.privatePostOpenOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['open'], None, since, limit)
if symbol is None:
return orders
return self.filter_by_symbol(orders, symbol)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
if since is not None:
request['start'] = int(since / 1000)
response = await self.privatePostClosedOrders(self.extend(request, params))
orders = self.parse_orders(response['result']['closed'], None, since, limit)
if symbol is None:
return orders
return self.filter_by_symbol(orders, symbol)
async def fetch_deposit_methods(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.privatePostDepositMethods(self.extend(request, params))
return response['result']
def parse_transaction_status(self, status):
# IFEX transaction states
statuses = {
'Initial': 'pending',
'Pending': 'pending',
'Success': 'ok',
'Settled': 'pending',
'Failure': 'failed',
'Partial': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {method: "Ether(Hex)",
# aclass: "currency",
# asset: "XETH",
# refid: "Q2CANKL-LBFVEE-U4Y2WQ",
# txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…",
# info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290",
# amount: "7.9999257900",
# fee: "0.0000000000",
# time: 1529223212,
# status: "Success" }
#
# fetchWithdrawals
#
# {method: "Ether",
# aclass: "currency",
# asset: "XETH",
# refid: "A2BF34S-O7LBNQ-UE4Y4O",
# txid: "0x288b83c6b0904d8400ef44e1c9e2187b5c8f7ea3d838222d53f701a15b5c274d",
# info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0",
# amount: "9.9950000000",
# fee: "0.0050000000",
# time: 1530481750,
# status: "Success" }
#
id = self.safe_string(transaction, 'refid')
txid = self.safe_string(transaction, 'txid')
timestamp = self.safe_timestamp(transaction, 'time')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
address = self.safe_string(transaction, 'info')
amount = self.safe_float(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
type = self.safe_string(transaction, 'type') # injected from the outside
feeCost = self.safe_float(transaction, 'fee')
if feeCost is None:
if type == 'deposit':
feeCost = 0
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'address': address,
'tag': None,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_transactions_by_type(self, type, transactions, code=None, since=None, limit=None):
result = []
for i in range(0, len(transactions)):
transaction = self.parse_transaction(self.extend({
'type': type,
}, transactions[i]))
result.append(transaction)
return self.filterByCurrencySinceLimit(result, code, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
# https://www.kraken.com/en-us/help/api#deposit-status
if code is None:
raise ArgumentsRequired(self.id + ' fetchDeposits requires a currency code argument')
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.privatePostDepositStatus(self.extend(request, params))
#
# { error: [],
# result: [{method: "Ether(Hex)",
# aclass: "currency",
# asset: "XETH",
# refid: "Q2CANKL-LBFVEE-U4Y2WQ",
# txid: "0x57fd704dab1a73c20e24c8696099b695d596924b401b261513cfdab23…",
# info: "0x615f9ba7a9575b0ab4d571b2b36b1b324bd83290",
# amount: "7.9999257900",
# fee: "0.0000000000",
# time: 1529223212,
# status: "Success" }]}
#
return self.parse_transactions_by_type('deposit', response['result'], code, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
# https://www.kraken.com/en-us/help/api#withdraw-status
if code is None:
raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument')
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.privatePostWithdrawStatus(self.extend(request, params))
#
# { error: [],
# result: [{method: "Ether",
# aclass: "currency",
# asset: "XETH",
# refid: "A2BF34S-O7LBNQ-UE4Y4O",
# txid: "0x298c83c7b0904d8400ef43e1c9e2287b518f7ea3d838822d53f704a1565c274d",
# info: "0x7cb275a5e07ba943fee972e165d80daa67cb2dd0",
# amount: "9.9950000000",
# fee: "0.0050000000",
# time: 1530481750,
# status: "Success" }]}
#
return self.parse_transactions_by_type('withdrawal', response['result'], code, since, limit)
async def create_deposit_address(self, code, params={}):
request = {
'new': 'true',
}
response = await self.fetch_deposit_address(code, self.extend(request, params))
address = self.safe_string(response, 'address')
self.check_address(address)
return {
'currency': code,
'address': address,
'info': response,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
# eslint-disable-next-line quotes
method = self.safe_string(params, 'method')
if method is None:
if self.options['cacheDepositMethodsOnFetchDepositAddress']:
# cache depositMethods
if not(code in list(self.options['depositMethods'].keys())):
self.options['depositMethods'][code] = await self.fetch_deposit_methods(code)
method = self.options['depositMethods'][code][0]['method']
else:
raise ExchangeError(self.id + ' fetchDepositAddress() requires an extra `method` parameter. Use fetchDepositMethods("' + code + '") to get a list of available deposit methods or enable the exchange property .options["cacheDepositMethodsOnFetchDepositAddress"] = True')
request = {
'asset': currency['id'],
'method': method,
}
response = await self.privatePostDepositAddresses(self.extend(request, params)) # overwrite methods
result = response['result']
numResults = len(result)
if numResults < 1:
raise InvalidAddress(self.id + ' privatePostDepositAddresses() returned no addresses')
address = self.safe_string(result[0], 'address')
tag = self.safe_string_2(result[0], 'tag', 'memo')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
if 'key' in params:
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': amount,
# 'address': address, # they don't allow withdrawals to direct addresses
}
response = await self.privatePostWithdraw(self.extend(request, params))
return {
'info': response,
'id': response['result'],
}
raise ExchangeError(self.id + " withdraw requires a 'key' parameter(withdrawal key name, as set up on your account)")
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/' + self.version + '/' + api + '/' + path
if api == 'public':
if params:
url += '?' + self.urlencode(params)
elif api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
body = self.urlencode(self.extend({'nonce': nonce}, params))
auth = self.encode(nonce + body)
hash = self.hash(auth, 'sha256', 'binary')
binary = self.encode(url)
binhash = self.binary_concat(binary, hash)
secret = base64.b64decode(self.secret)
signature = self.hmac(binhash, secret, hashlib.sha512, 'base64')
headers = {
'API-Key': self.apiKey,
'API-Sign': self.decode(signature),
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
url = '/' + path
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def nonce(self):
return self.milliseconds()
def handle_errors(self, code, reason, url, method, headers, body, response):
if code == 520:
raise ExchangeNotAvailable(self.id + ' ' + str(code) + ' ' + reason)
# todo: rewrite self for "broad" exceptions matching
if body.find('Invalid order') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body.find('Invalid nonce') >= 0:
raise InvalidNonce(self.id + ' ' + body)
if body.find('Insufficient funds') >= 0:
raise InsufficientFunds(self.id + ' ' + body)
if body.find('Cancel pending') >= 0:
raise CancelPending(self.id + ' ' + body)
if body.find('Invalid arguments:volume') >= 0:
raise InvalidOrder(self.id + ' ' + body)
if body[0] == '{':
if not isinstance(response, basestring):
if 'error' in response:
numErrors = len(response['error'])
if numErrors:
message = self.id + ' ' + self.json(response)
for i in range(0, len(response['error'])):
if response['error'][i] in self.exceptions:
raise self.exceptions[response['error'][i]](message)
raise ExchangeError(message)
| 40.315426
| 284
| 0.48282
|
36a1ffbe9d3dab8e4e9e4dcb2377274907374ceb
| 1,667
|
py
|
Python
|
ooobuild/lo/logging/logger_pool.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/logging/logger_pool.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/lo/logging/logger_pool.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Singleton Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.logging
# Libre Office Version: 7.3
from .x_logger_pool import XLoggerPool as XLoggerPool_c8c40c41
class LoggerPool(XLoggerPool_c8c40c41):
"""
Singleton Class
the global pool of named XLogger instances
The one and only LoggerPool instance is available at a component context as value with the key /singletons/com.sun.star.logging.LoggerPool.
**since**
OOo 2.3
See Also:
`API LoggerPool <https://api.libreoffice.org/docs/idl/ref/singletoncom_1_1sun_1_1star_1_1logging_1_1LoggerPool.html>`_
"""
__ooo_ns__: str = 'com.sun.star.logging'
__ooo_full_ns__: str = 'com.sun.star.logging.LoggerPool'
__ooo_type_name__: str = 'singleton'
_instance = None
def __new__(cls, *args, **kwargs):
# single instance only allowed
if cls._instance is None:
cls._instance = super().__new__(cls, *args, **kwargs)
return cls._instance
__all__ = ['LoggerPool']
| 30.87037
| 143
| 0.715057
|
e3e4a992d99a2b83c9725db27066929a025cf7fd
| 1,071
|
py
|
Python
|
data/make_traindata.py
|
existentmember7/cycle_gan
|
bab3b9d4f1e9faf7eaeb05002a1178dc1d8ccd4e
|
[
"MIT"
] | null | null | null |
data/make_traindata.py
|
existentmember7/cycle_gan
|
bab3b9d4f1e9faf7eaeb05002a1178dc1d8ccd4e
|
[
"MIT"
] | null | null | null |
data/make_traindata.py
|
existentmember7/cycle_gan
|
bab3b9d4f1e9faf7eaeb05002a1178dc1d8ccd4e
|
[
"MIT"
] | null | null | null |
import glob
import os
import argparse
parser = argparse.ArgumentParser(description='Process some parameters.')
parser.add_argument('--root', type=str, default='/media/han/新增磁碟區/cy/dataset/',help='root path')
args = parser.parse_args()
root_dir = args.root # /media/han/新增磁碟區/cy/dataset/
_is = [1,2,3,10,12,13,22,23]
_js = [6]
training_label_file = open(root_dir+'train_label_new.txt', 'w')
training_file_A = open(root_dir+'train_data_A_new.txt', 'w')
training_file_B = open(root_dir+'train_data_B_new.txt', 'w')
for i in _is:
for j in _js:
file_pathes = glob.glob(root_dir+'images/A/'+str(i)+'-'+str(j)+'/*.png')
for path in file_pathes:
lab = _is.index(int(i))
training_label_file.write(str(lab)+'\n')
training_file_A.write(path+'\n')
training_file_B.write(root_dir+'images/B/'+str(i)+'-'+str(j)+'/'+path.split('/')[-1]+'\n')
# print(root_dir+'images/B/'+str(i)+'-'+str(j)+'/'+path.split('/')[-1])
training_label_file.close()
training_file_A.close()
training_file_B.close()
| 32.454545
| 102
| 0.647993
|
6d2e7fb93d1faccbcbf3d3bfe44be73a64048cd6
| 1,396
|
py
|
Python
|
data_registry/migrations/0007_auto_20210504_0825.py
|
open-contracting/data-registry
|
5a73e7f2334c6af5be23070493842b494b3e5357
|
[
"BSD-3-Clause"
] | null | null | null |
data_registry/migrations/0007_auto_20210504_0825.py
|
open-contracting/data-registry
|
5a73e7f2334c6af5be23070493842b494b3e5357
|
[
"BSD-3-Clause"
] | 170
|
2021-02-12T12:52:37.000Z
|
2022-03-28T14:37:05.000Z
|
data_registry/migrations/0007_auto_20210504_0825.py
|
open-contracting/data-registry
|
5a73e7f2334c6af5be23070493842b494b3e5357
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1.6 on 2021-05-04 08:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_registry', '0006_auto_20210430_1310'),
]
operations = [
migrations.AddField(
model_name='task',
name='order',
field=models.IntegerField(blank=True, null=True),
),
migrations.AddField(
model_name='task',
name='type',
field=models.CharField(blank=True, max_length=2048, null=True),
),
migrations.AlterField(
model_name='collection',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='issue',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='job',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='task',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 31.727273
| 108
| 0.582378
|
002906209afc39943c982cf7af0d27b45fa12efe
| 269
|
py
|
Python
|
main.py
|
kuroneko1996/cyberlab
|
36e72b13efbe891c295143a89514296a3480bfea
|
[
"MIT"
] | 4
|
2017-08-14T04:23:25.000Z
|
2019-09-12T06:12:37.000Z
|
main.py
|
kuroneko1996/cyberlab
|
36e72b13efbe891c295143a89514296a3480bfea
|
[
"MIT"
] | 40
|
2017-08-14T07:23:56.000Z
|
2019-09-10T03:27:46.000Z
|
main.py
|
kuroneko1996/cyberlab
|
36e72b13efbe891c295143a89514296a3480bfea
|
[
"MIT"
] | 3
|
2017-08-14T04:51:54.000Z
|
2022-02-13T08:48:23.000Z
|
import pygame
from settings import *
from screens.menu import main_menu
def main():
pygame.init()
pygame.joystick.init()
display = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))
main_menu(display).run()
if __name__ == '__main__':
main()
| 17.933333
| 68
| 0.702602
|
b77d365f83da5123b469d24aaceed6f0e80297c8
| 2,998
|
py
|
Python
|
game/data/scripts/quests/510_AClansReputation/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 10
|
2019-07-27T13:12:11.000Z
|
2022-01-15T19:13:26.000Z
|
game/data/scripts/quests/510_AClansReputation/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 1
|
2021-08-06T12:15:01.000Z
|
2021-08-09T10:18:47.000Z
|
game/data/scripts/quests/510_AClansReputation/__init__.py
|
TheDemonLife/Lineage2Server-Interlude
|
d23d145db533fd899d4064026e4bc7ee45c6624a
|
[
"Apache-2.0"
] | 2
|
2020-02-20T23:02:26.000Z
|
2020-11-22T09:27:51.000Z
|
import sys
from ru.catssoftware.gameserver.model.quest import State
from ru.catssoftware.gameserver.model.quest import QuestState
from ru.catssoftware.gameserver.model.quest.jython import QuestJython as JQuest
from ru.catssoftware.gameserver.network.serverpackets import PledgeShowInfoUpdate
from ru.catssoftware.gameserver.network.serverpackets import SystemMessage
qn="510_AClansReputation"
# Quest NPC
Valdis = 31331
# Quest Items
Claw = 8767
# Reward
CLAN_POINTS_REWARD = 50 # Rep Points Per Tyrannosaurus Item - need to be confirmed
class Quest (JQuest) :
def __init__(self,id,name,descr) :
JQuest.__init__(self,id,name,descr)
self.questItemIds = [Claw]
def onAdvEvent (self,event,npc,player) :
st = player.getQuestState(qn)
if not st: return
cond = st.getInt("cond")
htmltext=event
if event == "31331-3.htm" :
if cond == 0 :
st.set("cond","1")
st.setState(State.STARTED)
st.playSound("ItemSound.quest_accept")
elif event == "31331-6.htm" :
st.playSound("ItemSound.quest_finish")
st.exitQuest(1)
return htmltext
def onTalk (self,npc,player) :
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
clan = player.getClan()
npcId = npc.getNpcId()
if player.getClan() == None or player.isClanLeader() == 0 :
st.exitQuest(1)
htmltext = "31331-0.htm"
elif player.getClan().getLevel() < 5 :
st.exitQuest(1)
htmltext = "31331-0.htm"
else :
cond = st.getInt("cond")
id = st.getState()
if id == State.CREATED and cond == 0 :
htmltext = "31331-1.htm"
elif id == State.STARTED and cond == 1 :
count = st.getQuestItemsCount(Claw)
if not count :
htmltext = "31331-4.htm"
elif count >= 1 :
htmltext = "31331-7.htm" # custom html
st.takeItems(Claw,-1)
reward = int(CLAN_POINTS_REWARD * count)
clan.setReputationScore(clan.getReputationScore()+reward,True)
player.sendPacket(SystemMessage(1777).addNumber(reward))
clan.broadcastToOnlineMembers(PledgeShowInfoUpdate(clan))
return htmltext
def onKill(self,npc,player,isPet) :
st = 0
if player.isClanLeader() :
st = player.getQuestState(qn)
else:
clan = player.getClan()
if clan:
leader=clan.getLeader()
if leader :
pleader= leader.getPlayerInstance()
if pleader :
if player.isInsideRadius(pleader, 1600, 1, 0) :
st = pleader.getQuestState(qn)
if not st : return
if st.getState() == State.STARTED :
npcId=npc.getNpcId()
if npcId in range(22215,22218) :
st.giveItems(Claw,1)
st.playSound("ItemSound.quest_itemget")
return
QUEST = Quest(510,qn,"A Clan's Reputation")
QUEST.addStartNpc(Valdis)
QUEST.addTalkId(Valdis)
for npc in range(22215,22218):
QUEST.addKillId(npc)
| 30.591837
| 151
| 0.672115
|
5d99773f934df36538b3a45b73e8f3f27a4e212b
| 9,172
|
py
|
Python
|
docs/conf.py
|
abravalheri/pyscaffoldext-pyproject
|
13cd647de47b90b67b37d8b9269a706fd1664ddb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
abravalheri/pyscaffoldext-pyproject
|
13cd647de47b90b67b37d8b9269a706fd1664ddb
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
abravalheri/pyscaffoldext-pyproject
|
13cd647de47b90b67b37d8b9269a706fd1664ddb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
import inspect
import shutil
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.join(__location__, '../src'))
# -- Run sphinx-apidoc ------------------------------------------------------
# This hack is necessary since RTD does not issue `sphinx-apidoc` before running
# `sphinx-build -b html . _build/html`. See Issue:
# https://github.com/rtfd/readthedocs.org/issues/1139
# DON'T FORGET: Check the box "Install your project inside a virtualenv using
# setup.py install" in the RTD Advanced Settings.
# Additionally it helps us to avoid running apidoc manually
try: # for Sphinx >= 1.7
from sphinx.ext import apidoc
except ImportError:
from sphinx import apidoc
output_dir = os.path.join(__location__, "api")
module_dir = os.path.join(__location__, "../src/pyscaffoldext")
try:
shutil.rmtree(output_dir)
except FileNotFoundError:
pass
try:
import sphinx
from distutils.version import LooseVersion
cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}"
cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir)
args = cmd_line.split(" ")
if LooseVersion(sphinx.__version__) >= LooseVersion('1.7'):
args = args[1:]
apidoc.main(args)
except Exception as e:
print("Running `sphinx-apidoc` failed!\n{}".format(e))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyscaffoldext-pyproject'
copyright = u'2018, Florian Wilhelm'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '' # Is set by calling `setup.py docs`
# The full version, including alpha/beta/rc tags.
release = '' # Is set by calling `setup.py docs`
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
try:
from pyscaffoldext.pyproject import __version__ as version
except ImportError:
pass
else:
release = version
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = ""
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyproject-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'user_guide.tex', u'pyscaffoldext-pyproject Documentation',
u'Florian Wilhelm', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = ""
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- External mapping ------------------------------------------------------------
python_version = '.'.join(map(str, sys.version_info[0:2]))
intersphinx_mapping = {
'sphinx': ('http://www.sphinx-doc.org/en/stable', None),
'python': ('https://docs.python.org/' + python_version, None),
'matplotlib': ('https://matplotlib.org', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'sklearn': ('http://scikit-learn.org/stable', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
}
| 33.97037
| 85
| 0.705408
|
5a91dc202b05ee63b6769b0bbeb2d6552c010c5e
| 1,030
|
py
|
Python
|
gwinc/ifo/CE1/__init__.py
|
ark0015/pygwinc_clone
|
65396ee42e851ab7189618cabe1c12081b5d982e
|
[
"Unlicense"
] | null | null | null |
gwinc/ifo/CE1/__init__.py
|
ark0015/pygwinc_clone
|
65396ee42e851ab7189618cabe1c12081b5d982e
|
[
"Unlicense"
] | null | null | null |
gwinc/ifo/CE1/__init__.py
|
ark0015/pygwinc_clone
|
65396ee42e851ab7189618cabe1c12081b5d982e
|
[
"Unlicense"
] | 1
|
2021-06-23T04:51:20.000Z
|
2021-06-23T04:51:20.000Z
|
from gwinc.ifo.noises import *
class Newtonian(nb.Budget):
"""Newtonian Gravity
"""
name = 'Newtonian'
style = dict(
label='Newtonian',
color='#15b01a',
)
noises = [
NewtonianRayleigh,
NewtonianBody,
NewtonianInfrasound,
]
class Coating(nb.Budget):
"""Coating Thermal
"""
name = 'Coating'
style = dict(
label='Coating Thermal',
color='#fe0002',
)
noises = [
CoatingBrownian,
CoatingThermoOptic,
]
class Substrate(nb.Budget):
"""Substrate Thermal
"""
name = 'Substrate'
style = dict(
label='Substrate Thermal',
color='#fb7d07',
linestyle='--',
)
noises = [
SubstrateBrownian,
SubstrateThermoElastic,
]
class CE1(nb.Budget):
name = 'Cosmic Explorer 1'
noises = [
QuantumVacuum,
Seismic,
Newtonian,
SuspensionThermal,
Coating,
Substrate,
ExcessGas,
]
| 14.927536
| 34
| 0.529126
|
af111874f4b207262d1415e4d3f908af74bbcbc4
| 14,810
|
py
|
Python
|
venv/Lib/site-packages/statsmodels/sandbox/tsa/movstat.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 6,931
|
2015-01-01T11:41:55.000Z
|
2022-03-31T17:03:24.000Z
|
venv/Lib/site-packages/statsmodels/sandbox/tsa/movstat.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 6,137
|
2015-01-01T00:33:45.000Z
|
2022-03-31T22:53:17.000Z
|
venv/Lib/site-packages/statsmodels/sandbox/tsa/movstat.py
|
EkremBayar/bayar
|
aad1a32044da671d0b4f11908416044753360b39
|
[
"MIT"
] | 2,608
|
2015-01-02T21:32:31.000Z
|
2022-03-31T07:38:30.000Z
|
'''using scipy signal and numpy correlate to calculate some time series
statistics
original developer notes
see also scikits.timeseries (movstat is partially inspired by it)
added 2009-08-29
timeseries moving stats are in c, autocorrelation similar to here
I thought I saw moving stats somewhere in python, maybe not)
TODO
moving statistics
- filters do not handle boundary conditions nicely (correctly ?)
e.g. minimum order filter uses 0 for out of bounds value
-> append and prepend with last resp. first value
- enhance for nd arrays, with axis = 0
Note: Equivalence for 1D signals
>>> np.all(signal.correlate(x,[1,1,1],'valid')==np.correlate(x,[1,1,1]))
True
>>> np.all(ndimage.filters.correlate(x,[1,1,1], origin = -1)[:-3+1]==np.correlate(x,[1,1,1]))
True
# multidimensional, but, it looks like it uses common filter across time series, no VAR
ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)
ndimage.filters.correlate(x,[1,1,1],origin = 1))
ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[1,1,1],[0,0,0]]), origin = 1)[0]==\
ndimage.filters.correlate(x,[1,1,1],origin = 1))
True
>>> np.all(ndimage.filters.correlate(np.vstack([x,x]),np.array([[0.5,0.5,0.5],[0.5,0.5,0.5]]), \
origin = 1)[0]==ndimage.filters.correlate(x,[1,1,1],origin = 1))
update
2009-09-06: cosmetic changes, rearrangements
'''
import numpy as np
from scipy import signal
from numpy.testing import assert_array_equal, assert_array_almost_equal
def expandarr(x,k):
#make it work for 2D or nD with axis
kadd = k
if np.ndim(x) == 2:
kadd = (kadd, np.shape(x)[1])
return np.r_[np.ones(kadd)*x[0],x,np.ones(kadd)*x[-1]]
def movorder(x, order = 'med', windsize=3, lag='lagged'):
'''moving order statistics
Parameters
----------
x : ndarray
time series data
order : float or 'med', 'min', 'max'
which order statistic to calculate
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
filtered array
'''
#if windsize is even should it raise ValueError
if lag == 'lagged':
lead = windsize//2
elif lag == 'centered':
lead = 0
elif lag == 'leading':
lead = -windsize//2 +1
else:
raise ValueError
if np.isfinite(order): #if np.isnumber(order):
ord = order # note: ord is a builtin function
elif order == 'med':
ord = (windsize - 1)/2
elif order == 'min':
ord = 0
elif order == 'max':
ord = windsize - 1
else:
raise ValueError
#return signal.order_filter(x,np.ones(windsize),ord)[:-lead]
xext = expandarr(x, windsize)
#np.r_[np.ones(windsize)*x[0],x,np.ones(windsize)*x[-1]]
return signal.order_filter(xext,np.ones(windsize),ord)[windsize-lead:-(windsize+lead)]
def check_movorder():
'''graphical test for movorder'''
import matplotlib.pylab as plt
x = np.arange(1,10)
xo = movorder(x, order='max')
assert_array_equal(xo, x)
x = np.arange(10,1,-1)
xo = movorder(x, order='min')
assert_array_equal(xo, x)
assert_array_equal(movorder(x, order='min', lag='centered')[:-1], x[1:])
tt = np.linspace(0,2*np.pi,15)
x = np.sin(tt) + 1
xo = movorder(x, order='max')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max lagged')
xo = movorder(x, order='max', lag='centered')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max centered')
xo = movorder(x, order='max', lag='leading')
plt.figure()
plt.plot(tt,x,'.-',tt,xo,'.-')
plt.title('moving max leading')
# identity filter
##>>> signal.order_filter(x,np.ones(1),0)
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9.])
# median filter
##signal.medfilt(np.sin(x), kernel_size=3)
##>>> plt.figure()
##<matplotlib.figure.Figure object at 0x069BBB50>
##>>> x=np.linspace(0,3,100);plt.plot(x,np.sin(x),x,signal.medfilt(np.sin(x), kernel_size=3))
# remove old version
##def movmeanvar(x, windowsize=3, valid='same'):
## '''
## this should also work along axis or at least for columns
## '''
## n = x.shape[0]
## x = expandarr(x, windowsize - 1)
## takeslice = slice(windowsize-1, n + windowsize-1)
## avgkern = (np.ones(windowsize)/float(windowsize))
## m = np.correlate(x, avgkern, 'same')#[takeslice]
## print(m.shape)
## print(x.shape)
## xm = x - m
## v = np.correlate(x*x, avgkern, 'same') - m**2
## v1 = np.correlate(xm*xm, avgkern, valid) #not correct for var of window
###>>> np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')-np.correlate(xm*xm,np.array([1,1,1])/3.0,'valid')**2
## return m[takeslice], v[takeslice], v1
def movmean(x, windowsize=3, lag='lagged'):
'''moving window mean
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
moving mean, with same shape as x
Notes
-----
for leading and lagging the data array x is extended by the closest value of the array
'''
return movmoment(x, 1, windowsize=windowsize, lag=lag)
def movvar(x, windowsize=3, lag='lagged'):
'''moving window variance
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
moving variance, with same shape as x
'''
m1 = movmoment(x, 1, windowsize=windowsize, lag=lag)
m2 = movmoment(x, 2, windowsize=windowsize, lag=lag)
return m2 - m1*m1
def movmoment(x, k, windowsize=3, lag='lagged'):
'''non-central moment
Parameters
----------
x : ndarray
time series data
windsize : int
window size
lag : 'lagged', 'centered', or 'leading'
location of window relative to current position
Returns
-------
mk : ndarray
k-th moving non-central moment, with same shape as x
Notes
-----
If data x is 2d, then moving moment is calculated for each
column.
'''
windsize = windowsize
#if windsize is even should it raise ValueError
if lag == 'lagged':
#lead = -0 + windsize #windsize//2
lead = -0# + (windsize-1) + windsize//2
sl = slice((windsize-1) or None, -2*(windsize-1) or None)
elif lag == 'centered':
lead = -windsize//2 #0#-1 #+ #(windsize-1)
sl = slice((windsize-1)+windsize//2 or None, -(windsize-1)-windsize//2 or None)
elif lag == 'leading':
#lead = -windsize +1#+1 #+ (windsize-1)#//2 +1
lead = -windsize +2 #-windsize//2 +1
sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
else:
raise ValueError
avgkern = (np.ones(windowsize)/float(windowsize))
xext = expandarr(x, windsize-1)
#Note: expandarr increases the array size by 2*(windsize-1)
#sl = slice(2*(windsize-1)+1+lead or None, -(2*(windsize-1)+lead)+1 or None)
print(sl)
if xext.ndim == 1:
return np.correlate(xext**k, avgkern, 'full')[sl]
#return np.correlate(xext**k, avgkern, 'same')[windsize-lead:-(windsize+lead)]
else:
print(xext.shape)
print(avgkern[:,None].shape)
# try first with 2d along columns, possibly ndim with axis
return signal.correlate(xext**k, avgkern[:,None], 'full')[sl,:]
#x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,[1],'full')
#x=0.5**np.arange(3);np.correlate(x,x,'same')
##>>> x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
##
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> xo
##xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> x=np.ones(10);xo=x-x.mean();a=np.correlate(xo,xo,'full')
##>>> xo=np.ones(10);d=np.correlate(xo,xo,'full')
##>>> d
##array([ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 9.,
## 8., 7., 6., 5., 4., 3., 2., 1.])
##def ccovf():
## pass
## #x=0.5**np.arange(10);xm=x-x.mean();a=np.correlate(xm,xo,'full')
__all__ = ['movorder', 'movmean', 'movvar', 'movmoment']
if __name__ == '__main__':
print('\ncheckin moving mean and variance')
nobs = 10
x = np.arange(nobs)
ws = 3
ave = np.array([ 0., 1/3., 1., 2., 3., 4., 5., 6., 7., 8.,
26/3., 9])
va = np.array([[ 0. , 0. ],
[ 0.22222222, 0.88888889],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.66666667, 2.66666667],
[ 0.22222222, 0.88888889],
[ 0. , 0. ]])
ave2d = np.c_[ave, 2*ave]
print(movmean(x, windowsize=ws, lag='lagged'))
print(movvar(x, windowsize=ws, lag='lagged'))
print([np.var(x[i-ws:i]) for i in range(ws, nobs)])
m1 = movmoment(x, 1, windowsize=3, lag='lagged')
m2 = movmoment(x, 2, windowsize=3, lag='lagged')
print(m1)
print(m2)
print(m2 - m1*m1)
# this implicitly also tests moment
assert_array_almost_equal(va[ws-1:,0],
movvar(x, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,0],
movvar(x, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,0],
movvar(x, windowsize=ws, lag='lagged'))
print('\nchecking moving moment for 2d (columns only)')
x2d = np.c_[x, 2*x]
print(movmoment(x2d, 1, windowsize=3, lag='centered'))
print(movmean(x2d, windowsize=ws, lag='lagged'))
print(movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(va[ws-1:,:],
movvar(x2d, windowsize=3, lag='leading'))
assert_array_almost_equal(va[ws//2:-ws//2+1,:],
movvar(x2d, windowsize=3, lag='centered'))
assert_array_almost_equal(va[:-ws+1,:],
movvar(x2d, windowsize=ws, lag='lagged'))
assert_array_almost_equal(ave2d[ws-1:],
movmoment(x2d, 1, windowsize=3, lag='leading'))
assert_array_almost_equal(ave2d[ws//2:-ws//2+1],
movmoment(x2d, 1, windowsize=3, lag='centered'))
assert_array_almost_equal(ave2d[:-ws+1],
movmean(x2d, windowsize=ws, lag='lagged'))
from scipy import ndimage
print(ndimage.filters.correlate1d(x2d, np.array([1,1,1])/3., axis=0))
#regression test check
xg = np.array([ 0. , 0.1, 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6,
4.5, 5.5, 6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5,
13.5, 14.5, 15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5,
22.5, 23.5, 24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5,
31.5, 32.5, 33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5,
40.5, 41.5, 42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5,
49.5, 50.5, 51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5,
58.5, 59.5, 60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5,
67.5, 68.5, 69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5,
76.5, 77.5, 78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5,
85.5, 86.5, 87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5,
94.5])
assert_array_almost_equal(xg, movmean(np.arange(100), 10,'lagged'))
xd = np.array([ 0.3, 0.6, 1. , 1.5, 2.1, 2.8, 3.6, 4.5, 5.5,
6.5, 7.5, 8.5, 9.5, 10.5, 11.5, 12.5, 13.5, 14.5,
15.5, 16.5, 17.5, 18.5, 19.5, 20.5, 21.5, 22.5, 23.5,
24.5, 25.5, 26.5, 27.5, 28.5, 29.5, 30.5, 31.5, 32.5,
33.5, 34.5, 35.5, 36.5, 37.5, 38.5, 39.5, 40.5, 41.5,
42.5, 43.5, 44.5, 45.5, 46.5, 47.5, 48.5, 49.5, 50.5,
51.5, 52.5, 53.5, 54.5, 55.5, 56.5, 57.5, 58.5, 59.5,
60.5, 61.5, 62.5, 63.5, 64.5, 65.5, 66.5, 67.5, 68.5,
69.5, 70.5, 71.5, 72.5, 73.5, 74.5, 75.5, 76.5, 77.5,
78.5, 79.5, 80.5, 81.5, 82.5, 83.5, 84.5, 85.5, 86.5,
87.5, 88.5, 89.5, 90.5, 91.5, 92.5, 93.5, 94.5, 95.4,
96.2, 96.9, 97.5, 98. , 98.4, 98.7, 98.9, 99. ])
assert_array_almost_equal(xd, movmean(np.arange(100), 10,'leading'))
xc = np.array([ 1.36363636, 1.90909091, 2.54545455, 3.27272727,
4.09090909, 5. , 6. , 7. ,
8. , 9. , 10. , 11. ,
12. , 13. , 14. , 15. ,
16. , 17. , 18. , 19. ,
20. , 21. , 22. , 23. ,
24. , 25. , 26. , 27. ,
28. , 29. , 30. , 31. ,
32. , 33. , 34. , 35. ,
36. , 37. , 38. , 39. ,
40. , 41. , 42. , 43. ,
44. , 45. , 46. , 47. ,
48. , 49. , 50. , 51. ,
52. , 53. , 54. , 55. ,
56. , 57. , 58. , 59. ,
60. , 61. , 62. , 63. ,
64. , 65. , 66. , 67. ,
68. , 69. , 70. , 71. ,
72. , 73. , 74. , 75. ,
76. , 77. , 78. , 79. ,
80. , 81. , 82. , 83. ,
84. , 85. , 86. , 87. ,
88. , 89. , 90. , 91. ,
92. , 93. , 94. , 94.90909091,
95.72727273, 96.45454545, 97.09090909, 97.63636364])
assert_array_almost_equal(xc, movmean(np.arange(100), 11,'centered'))
| 35.859564
| 109
| 0.505874
|
4ee0c581b41ce500b886866ec490ed4bcd5c623f
| 844
|
py
|
Python
|
train.py
|
thomasbrockmeier-ams/Open3D-ML
|
1e362bbf133537668923905a12a15c540d9b689d
|
[
"MIT"
] | null | null | null |
train.py
|
thomasbrockmeier-ams/Open3D-ML
|
1e362bbf133537668923905a12a15c540d9b689d
|
[
"MIT"
] | null | null | null |
train.py
|
thomasbrockmeier-ams/Open3D-ML
|
1e362bbf133537668923905a12a15c540d9b689d
|
[
"MIT"
] | null | null | null |
import os
import ml3d as _ml3d
import ml3d.tf as ml3d
def main():
cfg_file = "ml3d/configs/randlanet_amsterdam3d.yml"
cfg = _ml3d.utils.Config.load_from_file(cfg_file)
model = ml3d.models.RandLANet(**cfg.model)
cfg.dataset['dataset_path'] = "datasets/Cyclomedia_pc_verified"
dataset = _ml3d.datasets.Amsterdam3D(cfg.dataset.pop('dataset_path', None), **cfg.dataset)
pipeline = ml3d.pipelines.SemanticSegmentation(model=model, dataset=dataset, max_epoch=200, batch_size=1, device='gpu')
ckpt_folder = "./logs/"
os.makedirs(ckpt_folder, exist_ok=True)
pipeline.cfg_tb = {
"readme": "readme",
"cmd_line": "cmd_line",
"dataset": "Amsterdam3D",
"model": "RandLaNet",
"pipeline": "Default Pipeline",
}
pipeline.run_train()
if __name__ == "__main__":
main()
| 30.142857
| 123
| 0.674171
|
6f3d080093e541f5b2154906bbbcc9c352512046
| 642
|
py
|
Python
|
imdiff/analysis.py
|
jakubczaplicki/projecteuler
|
ab58762a65cb303f3e19cc8fc8c3d45831713719
|
[
"MIT"
] | null | null | null |
imdiff/analysis.py
|
jakubczaplicki/projecteuler
|
ab58762a65cb303f3e19cc8fc8c3d45831713719
|
[
"MIT"
] | null | null | null |
imdiff/analysis.py
|
jakubczaplicki/projecteuler
|
ab58762a65cb303f3e19cc8fc8c3d45831713719
|
[
"MIT"
] | null | null | null |
#
# Try to tell the difference between the images
#
import sys, os
import cv2
import numpy
def analyse():
inimg = cv2.imread('Lenna.png')
outimg = cv2.imread('LennaR.png')
bi, gi, ri = cv2.split(inimg)
bo, go, ro = cv2.split(outimg)
bi = numpy.asarray(bi)
bo = numpy.asarray(bo)
print bo
errR = (ro - ri)**2;
errG = (go - gi)**2;
errB = (bo - bi)**2;
n = float(inimg.size) / 3 #divide by 3 to get the number of image pixels
print errB.sum()
print sum(errB)
MSER = errR.sum() / n
MSEG = errG.sum() / n
MSEB = errB.sum() / n
print MSER, MSEG, MSEB
#PSNR = 20*log10(255) - 10*log10(MSE)
analyse()
| 18.882353
| 74
| 0.607477
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.